source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
hd_joint_probability_generator_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef HD_JOINT_PROBABILITY_GENERATOR_INL
#define HD_JOINT_PROBABILITY_GENERATOR_INL
#include "hdi/dimensionality_reduction/hd_joint_probability_generator.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include <random>
#include <chrono>
#include <unordered_set>
#include <numeric>
#ifdef __APPLE__
#include <dispatch/dispatch.h>
#else
#define __block
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
#include "flann/flann.h"
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Parameters::Parameters():
_perplexity(30),
_perplexity_multiplier(3),
_num_trees(4),
_num_checks(1024)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::Statistics():
_total_time(0),
_trees_construction_time(0),
_aknn_time(0),
_distribution_time(0)
{}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::reset(){
_total_time = 0;
_trees_construction_time = 0;
_aknn_time = 0;
_distribution_time = 0;
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::log(utils::AbstractLog* logger)const{
utils::secureLog(logger,"\n-------- HD Joint Probability Generator Statistics -----------");
utils::secureLogValue(logger,"Total time",_total_time);
utils::secureLogValue(logger,"\tTrees construction time",_trees_construction_time,true,1);
utils::secureLogValue(logger,"\tAKNN time",_aknn_time,true,3);
utils::secureLogValue(logger,"\tDistributions time",_distribution_time,true,2);
utils::secureLog(logger,"--------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::HDJointProbabilityGenerator():
_logger(nullptr)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeJointProbabilityDistribution(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
distribution.resize(num_dps);
std::vector<scalar_type> distances_squared;
std::vector<int> indices;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,distribution,params);
symmetrize(distribution);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
distribution.resize(num_dps);
std::vector<scalar_type> distances_squared;
std::vector<int> indices;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,distribution,params);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& probabilities, std::vector<int>& indices, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
std::vector<scalar_type> distances_squared;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,probabilities,params);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeHighDimensionalDistances(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& distances_squared, std::vector<int>& indices, Parameters& params){
hdi::utils::secureLog(_logger,"Computing nearest neighborhoods...");
flann::Matrix<scalar_type> dataset (high_dimensional_data,num_dps,num_dim);
flann::Matrix<scalar_type> query (high_dimensional_data,num_dps,num_dim);
flann::Index<flann::L2<scalar_type> > index(dataset, flann::KDTreeIndexParams(params._num_trees));
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
distances_squared.resize(num_dps*nn);
indices.resize(num_dps*nn);
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._trees_construction_time);
index.buildIndex();
}
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aknn_time);
flann::Matrix<int> indices_mat(indices.data(), query.rows, nn);
flann::Matrix<scalar_type> dists_mat(distances_squared.data(), query.rows, nn);
flann::SearchParams flann_params(params._num_checks);
flann_params.cores = 0; //all cores
index.knnSearch(query, indices_mat, dists_mat, nn, flann_params);
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, sparse_scalar_matrix& distribution, Parameters& params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const int n = distribution.size();
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
__block scalar_vector_type temp_vector(distances_squared.size(),0);
#ifdef __APPLE__
std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__APPLE__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
distances_squared.begin() + j*nn, //check squared
distances_squared.begin() + (j + 1)*nn,
temp_vector.begin() + j*nn,
temp_vector.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
0
);
}
#ifdef __APPLE__
);
#endif
for(int j = 0; j < n; ++j){
for(int k = 1; k < nn; ++k){
const unsigned int i = j*nn+k;
distribution[j][indices[i]] = temp_vector[i];
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, std::vector<scalar_type>& probabilities, Parameters& params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
const int n = indices.size()/nn;
#ifdef __APPLE__
std::cout << "GCD dispatch, hd_joint_probability_generator 232.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__APPLE__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
distances_squared.begin() + j*nn, //check squared
distances_squared.begin() + (j + 1)*nn,
probabilities.begin() + j*nn,
probabilities.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
0
);
}
#ifdef __APPLE__
);
#endif
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::symmetrize(sparse_scalar_matrix& distribution){
const int n = distribution.size();
for(int j = 0; j < n; ++j){
for(auto& e: distribution[j]){
const unsigned int i = e.first;
scalar new_val = (distribution[j][i]+distribution[i][j])*0.5;
distribution[j][i] = new_val;
distribution[i][j] = new_val;
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributionsFromDistanceMatrix(const std::vector<scalar_type>& squared_distance_matrix, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const int n = num_dps;
const unsigned int nn = num_dps;
__block scalar_vector_type temp_vector(num_dps*num_dps,0);
distribution.clear();
distribution.resize(n);
#ifdef __APPLE__
std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__APPLE__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
squared_distance_matrix.begin() + j*nn, //check squared
squared_distance_matrix.begin() + (j + 1)*nn,
temp_vector.begin() + j*nn,
temp_vector.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
j
);
}
#ifdef __APPLE__
);
#endif
for(int j = 0; j < n; ++j){
for(int k = 0; k < nn; ++k){
const unsigned int i = j*nn+k;
distribution[j][k] = temp_vector[i];
}
}
}
///////////////////////////////////////////////////////////////////////////////////7
}
}
#endif
|
test.c | /******************************************************************************
* FILE: omp_hello.c
* DESCRIPTION:
* OpenMP Example - Hello World - C/C++ Version
* In this simple example, the master thread forks a parallel region.
* All threads in the team obtain their unique thread number and print it.
* The master thread only prints the total number of threads. Two OpenMP
* library routines are used to obtain the number of threads and each
* thread's number.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include "omp.h"
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
}
|
vecAdd_deadlock.c | /******************************************************************************
* FILE: omp_bug5.c
* DESCRIPTION:
* Using SECTIONS, two threads initialize their own array and then add
* it to the other's array, however a deadlock occurs.
* AUTHOR: Blaise Barney 01/29/04
* LAST REVISED: 04/06/05
******************************************************************************/
/**
* The first thread acquires locka and then tries to get lockb before releasing
* locka. Meanwhile, the second thread has acquired lockb and then tries to get
* locka before releasing lockb.
* Online source:
* https://computing.llnl.gov/tutorials/openMP/samples/C/omp_bug5.c
**/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 10 //Originally 1000000
#define PI 3.1415926535
#define DELTA .01415926535
int main (int argc, char *argv[])
{
int nthreads, tid, i;
float a[N], b[N];
omp_lock_t locka, lockb;
/* Initialize the locks */
omp_init_lock(&locka);
omp_init_lock(&lockb);
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid)
{
/* Obtain thread number and number of threads */
tid = omp_get_thread_num();
#pragma omp master
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n", tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
printf("Thread %d initializing a[]\n",tid);
omp_set_lock(&locka);
for (i=0; i<N; i++)
a[i] = i * DELTA;
omp_set_lock(&lockb);
printf("Thread %d adding a[] to b[]\n",tid);
for (i=0; i<N; i++)
b[i] += a[i];
omp_unset_lock(&lockb);
omp_unset_lock(&locka);
}
#pragma omp section
{
printf("Thread %d initializing b[]\n",tid);
omp_set_lock(&lockb);
for (i=0; i<N; i++)
b[i] = i * PI;
omp_set_lock(&locka);
printf("Thread %d adding b[] to a[]\n",tid);
for (i=0; i<N; i++)
a[i] += b[i];
omp_unset_lock(&locka);
omp_unset_lock(&lockb);
}
} /* end of sections */
} /* end of parallel region */
}
|
hermv_c_dia_n_lo_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t
ONAME(const ALPHA_Complex alpha,
const ALPHA_SPMAT_DIA *A,
const ALPHA_Complex *x,
const ALPHA_Complex beta,
ALPHA_Complex *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis == 0)
{
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < m; ++j)
{
ALPHA_Number v;
alpha_mul_3c(v, alpha, A->values[start + j]);
alpha_madde(tmp[threadId][j], v, x[j]);
}
}
else if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Complex v,v_c;
ALPHA_Complex val_orig = A->values[start + row_start + j];
ALPHA_Complex val_conj = {val_orig.real,-val_orig.imag};
alpha_mul(v, alpha, val_orig);
alpha_mul(v_c, alpha, val_conj);
alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]);
alpha_madde(tmp[threadId][row_start + j], v_c, x[col_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
4.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
int main() {
FILE *in = fopen("4_in.txt", "r");
FILE *out = fopen("4_out.txt", "w");
int n;
fscanf(in, "%d", &n);
double *v, *v2;
v = (double*) calloc(n, sizeof(double));
v2 = (double*) calloc(n, sizeof(double));
int i, j, k;
for(i = 0; i < n; i++){
fscanf(in, "%lf", &(v[i]));
v2[i] = v[i];
}
int ln = ceil(log2(n));
for (i = 0; i < ln; i++) {
int is = pow(2, i);
#pragma omp parallel for private(j) shared(v2, v)
for (j = 0; j < n; j++) {
v2[j] = v[j];
if (j >= is) {
v2[j] += v[j - is];
}
}
memcpy(v, v2, n * sizeof(double));
}
fprintf(out, "%d\n", n);
for (i = 0; i < n; i++) {
fprintf(out, "%.4lf\n", v2[i]);
}
free(v);
free(v2);
fclose(in);
fclose(out);
return 0;
} |
pyfr_driver_asp_reg.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
# include <mkl.h>
#else /* prototypes for GEMM */
void my_dgemm( const int* M, const int* N, const int* K, const double* alpha,
const double* a, const int* LDA, const double* b, const int* LDB,
const double* beta, double* c, const int* LDC ) {
const int my_M = *M;
const int my_N = *N;
const int my_K = *K;
const int my_LDA = *LDA;
const int my_LDB = *LDB;
const int my_LDC = *LDC;
const float my_alpha = (float)*alpha;
const float my_beta = (float)*beta;
int m = 0, n = 0, k = 0;
for ( n = 0; n < my_N; ++n ) {
for ( m = 0; m < my_M; ++m ) {
c[(n * my_LDC) + m] = my_beta * c[(n * my_LDC) + m];
for ( k = 0; k < my_K; ++k ) {
c[(n * my_LDC) + m] += my_alpha * a[(k * my_LDA) + m] * b[(n * my_LDB) + k];
}
}
}
}
#endif
#define REPS 100
#define REALTYPE double
int my_csr_reader( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return -1;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return -1;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&
0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)
{
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * ((size_t)*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return -1;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*((size_t)*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*((size_t)*o_element_count));
memset(*o_values, 0, sizeof(double)*((size_t)*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*((size_t)*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return -1;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return -1;
}
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return -1;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
assert(NULL != l_row_idx_id);
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
char* l_csr_file;
REALTYPE* l_a_sp;
unsigned int* l_rowptr;
unsigned int* l_colidx;
unsigned int l_rowcount, l_colcount, l_elements;
REALTYPE* l_a_dense;
REALTYPE* l_b;
REALTYPE* l_c_betaone;
REALTYPE* l_c_betazero;
REALTYPE* l_c_gold_betaone;
REALTYPE* l_c_gold_betazero;
REALTYPE* l_c_dense_betaone;
REALTYPE* l_c_dense_betazero;
REALTYPE l_max_error = 0.0;
int l_m;
int l_n;
int l_k;
int l_i;
int l_j;
int l_z;
int l_elems;
int l_reps;
int l_n_block;
libxsmm_timer_tickint l_start, l_end;
double l_total;
double alpha = 1.0;
double beta = 1.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
char trans = 'N';
#endif
libxsmm_dfsspmdm* gemm_op_betazero = NULL;
libxsmm_dfsspmdm* gemm_op_betaone = NULL;
if (argc != 4) {
fprintf( stderr, "need csr-filename N reps!\n" );
exit(-1);
}
/* read sparse A */
l_csr_file = argv[1];
l_n = atoi(argv[2]);
l_reps = atoi(argv[3]);
if (my_csr_reader( l_csr_file,
&l_rowptr,
&l_colidx,
&l_a_sp,
&l_rowcount, &l_colcount, &l_elements ) != 0 )
{
exit(-1);
}
l_m = l_rowcount;
l_k = l_colcount;
printf("CSR matrix data structure we just read:\n");
printf("rows: %u, columns: %u, elements: %u\n", l_rowcount, l_colcount, l_elements);
/* allocate dense matrices */
l_a_dense = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_m, 64);
l_b = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_n, 64);
l_c_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_gold_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_gold_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_dense_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_dense_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
/* touch B */
for ( l_i = 0; l_i < l_k*l_n; l_i++) {
l_b[l_i] = (REALTYPE)libxsmm_rng_f64();
}
/* touch dense A */
for ( l_i = 0; l_i < l_k*l_m; l_i++) {
l_a_dense[l_i] = (REALTYPE)0.0;
}
/* init dense A using sparse A */
for ( l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
for ( l_z = 0; l_z < l_elems; l_z++ ) {
l_a_dense[(l_i*l_k)+l_colidx[l_rowptr[l_i]+l_z]] = l_a_sp[l_rowptr[l_i]+l_z];
}
}
/* touch C */
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_gold_betaone[l_i] = (REALTYPE)libxsmm_rng_f64();
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_betaone[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_dense_betaone[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_betazero[l_i] = l_c_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_gold_betazero[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_dense_betazero[l_i] = l_c_dense_betaone[l_i];
}
/* setting up fsspmdm */
l_n_block = 48;
beta = 0.0;
gemm_op_betazero = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 1, l_a_dense );
beta = 1.0;
gemm_op_betaone = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 0, l_a_dense );
/* compute golden results */
printf("computing golden solution...\n");
for ( l_j = 0; l_j < l_n; l_j++ ) {
for (l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
l_c_gold_betazero[(l_n*l_i) + l_j] = 0.0;
for (l_z = 0; l_z < l_elems; l_z++) {
l_c_gold_betazero[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j];
}
}
}
for ( l_j = 0; l_j < l_n; l_j++ ) {
for (l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
for (l_z = 0; l_z < l_elems; l_z++) {
l_c_gold_betaone[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j];
}
}
}
printf("...done!\n");
/* libxsmm generated code */
printf("computing libxsmm (A sparse) solution...\n");
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z );
}
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z );
}
printf("...done!\n");
/* BLAS code */
printf("computing BLAS (A dense) solution...\n");
beta = 0.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#endif
beta = 1.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#endif
printf("...done!\n");
/* check for errors */
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]);
}
}
ret |= l_max_error > 1e-4;
printf("max error beta=0 (libxmm vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]);
}
}
ret |= l_max_error > 1e-4;
printf("max error beta=1 (libxmm vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]);
}
}
printf("max error beta=0 (dense vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]);
}
}
printf("max error beta=1 (dense vs. gold): %f\n", l_max_error);
/* Let's measure performance */
l_start = libxsmm_timer_tick();
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * (((double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
beta = 0.0;
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#endif
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
beta = 1.0;
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#endif
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
/* free */
libxsmm_dfsspmdm_destroy( gemm_op_betazero );
libxsmm_dfsspmdm_destroy( gemm_op_betaone );
return ret;
}
|
Metric.h | //
// Created by Jin Zhu on 2020/2/18.
//
// #define R_BUILD
#ifndef SRC_METRICS_H
#define SRC_METRICS_H
#include "Data.h"
#include "Algorithm.h"
#include "model_fit.h"
// #include "path.h"
#include <vector>
#include <random>
#include <algorithm>
#include "utilities.h"
template <class T1, class T2, class T3, class T4>
// To do: calculate loss && all to one && lm poisson cox
class Metric
{
public:
bool is_cv;
int Kfold;
int ic_type;
// Eigen::Matrix<T2, Dynamic, 1> cv_initial_model_param;
// Eigen::Matrix<T3, Dynamic, 1> cv_initial_coef0;
std::vector<Eigen::VectorXi> cv_initial_A;
std::vector<Eigen::VectorXi> cv_initial_I;
std::vector<Eigen::VectorXi> train_mask_list;
std::vector<Eigen::VectorXi> test_mask_list;
std::vector<T4> train_X_list;
std::vector<T4> test_X_list;
std::vector<T1> train_y_list;
std::vector<T1> test_y_list;
std::vector<Eigen::VectorXd> train_weight_list;
std::vector<Eigen::VectorXd> test_weight_list;
std::vector<FIT_ARG<T2, T3>> cv_init_fit_arg;
// std::vector<std::vector<T4>> group_XTX_list;
double ic_coef;
Metric() = default;
Metric(int ic_type, double ic_coef = 1.0, bool is_cv = false, int Kfold = 5)
{
this->is_cv = is_cv;
this->ic_type = ic_type;
this->Kfold = Kfold;
this->ic_coef = ic_coef;
if (is_cv)
{
cv_init_fit_arg.resize(Kfold);
train_X_list.resize(Kfold);
test_X_list.resize(Kfold);
train_y_list.resize(Kfold);
test_y_list.resize(Kfold);
test_weight_list.resize(Kfold);
train_weight_list.resize(Kfold);
}
};
void set_cv_init_fit_arg(int p, int M)
{
for (int i = 0; i < this->Kfold; i++)
{
T2 beta_init;
T3 coef0_init;
coef_set_zero(p, M, beta_init, coef0_init);
Eigen::VectorXi A_init;
Eigen::VectorXd bd_init;
FIT_ARG<T2, T3> fit_arg(0, 0., beta_init, coef0_init, bd_init, A_init);
cv_init_fit_arg[i] = fit_arg;
}
}
// void set_cv_initial_model_param(int Kfold, int p)
// {
// this->cv_initial_model_param = Eigen::MatrixXd::Zero(p, Kfold);
// };
// void set_cv_initial_A(int Kfold, int p)
// {
// vector<Eigen::VectorXi> tmp(Kfold);
// this->cv_initial_A = tmp;
// };
// void set_cv_initial_coef0(int Kfold, int p)
// {
// vector<double> tmp(Kfold);
// for (int i = 0; i < Kfold; i++)
// tmp[i] = 0;
// this->cv_initial_coef0 = tmp;
// };
// void update_cv_initial_model_param(Eigen::VectorXd model_param, int k)
// {
// this->cv_initial_model_param.col(k) = model_param;
// }
// void update_cv_initial_A(Eigen::VectorXi A, int k)
// {
// this->cv_initial_A[k] = A;
// }
// void update_cv_initial_coef0(double coef0, int k)
// {
// this->cv_initial_coef0[k] = coef0;
// }
void set_cv_train_test_mask(Data<T1, T2, T3, T4> &data, int n, Eigen::VectorXi &cv_fold_id)
{
Eigen::VectorXi index_list(n);
std::vector<int> index_vec((unsigned int)n);
std::vector<Eigen::VectorXi> group_list((unsigned int)this->Kfold);
for (int i = 0; i < n; i++)
{
index_vec[i] = i;
}
if (cv_fold_id.size() == 0){
// std::random_device rd;
std::mt19937 g(123);
std::shuffle(index_vec.begin(), index_vec.end(), g);
for (int i = 0; i < n; i++)
{
index_list(i) = index_vec[i];
}
Eigen::VectorXd loss_list(this->Kfold);
int group_size = int(n / this->Kfold);
for (int k = 0; k < (this->Kfold - 1); k++)
{
group_list[k] = index_list.segment(int(k * group_size), group_size);
}
group_list[this->Kfold - 1] = index_list.segment(int((this->Kfold - 1) * group_size),
n - int(int(this->Kfold - 1) * group_size));
}else{
// given cv_fold_id
auto rule = [cv_fold_id](int i, int j) -> bool
{
return cv_fold_id(i) < cv_fold_id(j);
};
std::sort(index_vec.begin(), index_vec.end(), rule);
for (int i = 0; i < n; i++)
{
index_list(i) = index_vec[i];
}
int k = 0, st = 0, ed = 1;
while (k < this->Kfold && ed < n){
int mask = cv_fold_id(index_list(st));
while (ed < n && mask == cv_fold_id(index_list(ed))) ed++;
group_list[k] = index_list.segment(st, ed - st);
st = ed; ed++; k++;
}
}
for (int k = 0; k < this->Kfold; k++)
{
std::sort(group_list[k].data(), group_list[k].data() + group_list[k].size());
}
// cv train-test partition:
std::vector<Eigen::VectorXi> train_mask_list_tmp((unsigned int)this->Kfold);
std::vector<Eigen::VectorXi> test_mask_list_tmp((unsigned int)this->Kfold);
for (int k = 0; k < this->Kfold; k++)
{
int train_x_size = n - group_list[k].size();
// get train_mask
Eigen::VectorXi train_mask(train_x_size);
int i = 0;
for (int j = 0; j < this->Kfold; j++)
{
if (j != k)
{
for (int s = 0; s < group_list[j].size(); s++)
{
train_mask(i) = group_list[j](s);
i++;
}
}
}
std::sort(train_mask.data(), train_mask.data() + train_mask.size());
train_mask_list_tmp[k] = train_mask;
test_mask_list_tmp[k] = group_list[k];
slice(data.x, train_mask, this->train_X_list[k]);
slice(data.x, group_list[k], this->test_X_list[k]);
slice(data.y, train_mask, this->train_y_list[k]);
slice(data.y, group_list[k], this->test_y_list[k]);
slice(data.weight, train_mask, this->train_weight_list[k]);
slice(data.weight, group_list[k], this->test_weight_list[k]);
}
this->train_mask_list = train_mask_list_tmp;
this->test_mask_list = test_mask_list_tmp;
};
// void cal_cv_group_XTX(Data<T1, T2, T3> &data)
// {
// int p = data.p;
// Eigen::VectorXi index = data.g_index;
// Eigen::VectorXi gsize = data.g_size;
// int N = data.g_num;
// std::vector<std::vector<Eigen::MatrixXd>> group_XTX_list_tmp(this->Kfold);
// for (int k = 0; k < this->Kfold; k++)
// {
// int train_size = this->train_mask_list[k].size();
// Eigen::MatrixXd train_x(train_size, p);
// for (int i = 0; i < train_size; i++)
// {
// train_x.row(i) = data.x.row(this->train_mask_list[k](i));
// };
// group_XTX_list_tmp[k] = group_XTX(train_x, index, gsize, train_size, p, N, 1);
// }
// this->group_XTX_list = group_XTX_list_tmp;
// }
double ic(int train_n, int M, int N, Algorithm<T1, T2, T3, T4> *algorithm)
{
double loss;
if (algorithm->model_type == 1 || algorithm->model_type == 5)
{
loss = train_n * log(algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum());
}
else
{
loss = 2 * (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum());
}
if (ic_type == 1)
{
return loss + 2.0 * algorithm->get_effective_number();
}
else if (ic_type == 2)
{
return loss + this->ic_coef * (double(train_n)) * algorithm->get_effective_number();
}
else if (ic_type == 3)
{
return loss + this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number();
}
else if (ic_type == 4)
{
return loss + this->ic_coef * (log(double(train_n)) + 2 * log(double(N))) * algorithm->get_effective_number();
}
else
return 0;
};
double neg_loglik_loss(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int train_n, int p, int N, Algorithm<T1, T2, T3, T4> *algorithm)
{
Eigen::VectorXi A = algorithm->get_A_out();
T2 beta = algorithm->get_beta();
T3 coef0 = algorithm->get_coef0();
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, p, N);
T4 X_A = X_seg(train_x, train_n, A_ind);
T2 beta_A;
slice(beta, A_ind, beta_A);
// Eigen::VectorXd beta_A(A_ind.size());
// for (int k = 0; k < A_ind.size(); k++)
// {
// beta_A(k) = beta(A_ind(k));
// }
double L0 = algorithm->neg_loglik_loss(X_A, train_y, train_weight, beta_A, coef0, A, g_index, g_size, 0.0);
return L0;
}
// to do
double fit_and_evaluate_in_metric(Algorithm<T1, T2, T3, T4> *algorithm, Data<T1, T2, T3, T4> &data, std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list, FIT_ARG<T2, T3> &fit_arg)
{
int N = data.g_num;
algorithm->update_sparsity_level(fit_arg.support_size);
algorithm->update_lambda_level(fit_arg.lambda);
algorithm->update_beta_init(fit_arg.beta_init);
algorithm->update_bd_init(fit_arg.bd_init);
algorithm->update_coef0_init(fit_arg.coef0_init);
algorithm->update_A_init(fit_arg.A_init, N);
algorithm->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p, data.g_num, data.status, algorithm->Sigma);
if (algorithm->get_warm_start())
{
fit_arg.beta_init = algorithm->get_beta();
fit_arg.coef0_init = algorithm->get_coef0();
fit_arg.bd_init = algorithm->get_bd();
}
if (is_cv)
{
Eigen::VectorXi g_index = data.g_index;
Eigen::VectorXi g_size = data.g_size;
int p = data.p;
int N = data.g_num;
Eigen::VectorXd loss_list(this->Kfold);
#pragma omp parallel for
///////////////////////parallel/////////////////////////
for (int k = 0; k < this->Kfold; k++)
{
//get test_x, test_y
int test_n = this->test_mask_list[k].size();
int train_n = this->train_mask_list[k].size();
// train & test data
// Eigen::MatrixXd train_x = matrix_slice(data.x, this->train_mask_list[k], 0);
// Eigen::MatrixXd test_x = matrix_slice(data.x, this->test_mask_list[k], 0);
// Eigen::VectorXd train_y = vector_slice(data.y, this->train_mask_list[k]);
// Eigen::VectorXd test_y = vector_slice(data.y, this->test_mask_list[k]);
// Eigen::VectorXd train_weight = vector_slice(data.weight, this->train_mask_list[k]);
// Eigen::VectorXd test_weight = vector_slice(data.weight, this->test_mask_list[k]);
// Eigen::VectorXd beta_init;
algorithm_list[k]->update_sparsity_level(fit_arg.support_size);
algorithm_list[k]->update_lambda_level(fit_arg.lambda);
if (algorithm_list[k]->get_warm_start())
{
algorithm_list[k]->update_beta_init(this->cv_init_fit_arg[k].beta_init);
algorithm_list[k]->update_bd_init(this->cv_init_fit_arg[k].bd_init);
algorithm_list[k]->update_coef0_init(this->cv_init_fit_arg[k].coef0_init);
algorithm_list[k]->update_A_init(this->cv_init_fit_arg[k].A_init, N);
// beta_init = this->cv_initial_model_param.col(k).eval();
// algorithm->update_beta_init(beta_init);
// algorithm->update_coef0_init(this->cv_initial_coef0[k]);
// algorithm->update_A_init(this->cv_initial_A[k], N);
}
// algorithm->update_train_mask(this->train_mask_list[k]);
/// ??????????????????????????????????????????????????????????????
algorithm_list[k]->fit(this->train_X_list[k], this->train_y_list[k], this->train_weight_list[k], g_index, g_size, train_n, p, N, data.status, algorithm_list[k]->Sigma);
if (algorithm_list[k]->get_warm_start())
{
this->cv_init_fit_arg[k].beta_init = algorithm->get_beta();
this->cv_init_fit_arg[k].coef0_init = algorithm->get_coef0();
this->cv_init_fit_arg[k].bd_init = algorithm->get_bd();
// this->update_cv_initial_model_param(algorithm->get_beta(), k);
// this->update_cv_initial_A(algorithm->get_A_out(), k);
// this->update_cv_initial_coef0(algorithm->get_coef0(), k);
}
loss_list(k) = this->neg_loglik_loss(this->test_X_list[k], this->test_y_list[k], this->test_weight_list[k], g_index, g_size, test_n, p, N, algorithm_list[k]);
}
return loss_list.mean();
}
else
{
return this->ic(data.n, data.M, data.g_num, algorithm);
}
};
};
#endif //SRC_METRICS_H |
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
// #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
Index hh_omp = (n+threads*4-1)/(threads*4);
#pragma omp parallel for schedule(dynamic,hh_omp) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
// {
// enum {
// Defined = 1
// };
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
// };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
DCSRTile.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_DCSRTILE_H_
#define SRC_DCSRTILE_H_
#include <string>
#include <algorithm>
#include <vector>
#include "GMDP/utils/binary_search.h"
template <typename T>
class DCSRTile {
public:
std::string name;
int m;
int n;
int num_rows;
int nnz;
int num_partitions;
int * partition_ptrs;
T* a;
int* ja;
int* ia;
int* row_ids;
// Serialize
friend boost::serialization::access;
template<class Archive>
void save(Archive& ar, const unsigned int version) const {
ar & name;
ar & m;
ar & n;
ar & num_rows;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < m+1 ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_rows ; i++)
{
ar & row_ids[i];
}
}
}
template<class Archive>
void load(Archive& ar, const unsigned int version) {
ar & name;
ar & m;
ar & n;
ar & num_rows;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
row_ids = reinterpret_cast<int*>(_mm_malloc(num_rows * sizeof(int), 64));
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < m+1 ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_rows ; i++)
{
ar & row_ids[i];
}
}
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
DCSRTile() : name("TEMP"), m(0), n(0), nnz(0) {}
DCSRTile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0) {}
DCSRTile(edge_t<T>* edges, int _m, int _n, int _nnz, int row_start,
int col_start)
: name("TEMP"), m(_m), n(_n), nnz(_nnz) {
if(nnz > 0)
{
__gnu_parallel::sort(edges, edges + nnz, [](const edge_t<T>& a, const edge_t<T>& b)
{
if (a.src < b.src) return true; else if (a.src > b.src) return false;
if (a.dst < b.dst) return true; else if (a.dst > b.dst) return false;
return false;
});
int * tmp_buf = new int[nnz];
tmp_buf[0] = 0;
for(int i = 0 ; i < nnz-1 ; i++)
{
if(edges[i+1].src > edges[i].src)
{
tmp_buf[i+1] = tmp_buf[i] + 1;
}
else
{
tmp_buf[i+1] = tmp_buf[i];
}
}
num_rows = tmp_buf[nnz-1]+1;
row_ids = reinterpret_cast<int*>(_mm_malloc(((num_rows)) * sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc(((num_rows) + 1) * sizeof(int), 64));
row_ids[0] = (edges[0].src - row_start) - 1;
ia[0] = 0;
for(int i = 0 ; i < nnz-1 ; i++)
{
if(edges[i+1].src > edges[i].src)
{
row_ids[tmp_buf[i+1]] = (edges[i+1].src - row_start) - 1;
ia[tmp_buf[i+1]] = i+1;
}
}
ia[num_rows] = nnz;
delete [] tmp_buf;
num_partitions = omp_get_max_threads() * 4;
partition_ptrs = new int[num_partitions+1];
int rows_per_partition = ((num_rows + num_partitions) - 1) / num_partitions;
partition_ptrs[0] = 0;
for(int p = 1 ; p < num_partitions ; p++)
{
int new_row = partition_ptrs[p-1] + rows_per_partition;
if(new_row > num_rows)
{
new_row = num_rows;
}
// Increase new row to next 32-bit boundary
int row32 = row_ids[new_row] / 32;
while((new_row < num_rows) && ((row_ids[new_row] / 32) == row32))
{
new_row++;
}
partition_ptrs[p] = new_row;
}
partition_ptrs[num_partitions] = num_rows;
ja = reinterpret_cast<int*>(_mm_malloc((nnz ) * sizeof(int), 64));
a = reinterpret_cast<T*>(_mm_malloc((nnz) * sizeof(T), 64));
for(int i = 0 ; i < num_rows ; i++)
{
for(int j = ia[i] ; j < ia[i+1] ; j++)
{
ja[j] = (edges[j].dst - col_start) - 1;
a[j] = edges[j].val;
}
}
#ifdef __DEBUG
unsigned long int nzcnt = 0;
for(int p = 0 ; p < num_partitions ; p++)
{
for(int _row = partition_ptrs[p] ; _row < partition_ptrs[p+1]; _row++)
{
int row = row_ids[_row];
for(int j = ia[_row] ; j < ia[_row+1] ; j++)
{
assert(edges[nzcnt].src == (row + row_start + 1) );
assert(edges[nzcnt].dst == (ja[j] + col_start + 1));
assert(edges[nzcnt].val == (a[j]));
nzcnt++;
}
}
}
assert(nzcnt == nnz);
#endif
}
}
bool isEmpty() const { return nnz <= 0; }
void get_edges(edge_t<T>* edges, int row_start, int col_start) {
unsigned int nnzcnt = 0;
if(this->nnz > 0)
{
#pragma omp parallel for reduction(+:nnzcnt)
for (int i = 0; i < this->num_rows; i++) {
for (int nz_id = ia[i]; nz_id < ia[i + 1]; nz_id++) {
edges[nz_id].src = row_ids[i] + row_start + 1;
edges[nz_id].dst = ja[nz_id] + col_start + 1;
edges[nz_id].val = a[nz_id];
nnzcnt++;
}
}
assert(nnzcnt == this->nnz);
}
}
DCSRTile& operator=(DCSRTile other) {
this->name = other.name;
this->m = other.m;
this->n = other.n;
this->num_rows = other.num_rows;
this->nnz = other.nnz;
this->a = other.a;
this->ia = other.ia;
this->row_ids = other.row_ids;
this->ja = other.ja;
this->num_partitions = other.num_partitions;
this->partition_ptrs = other.partition_ptrs;
}
void clear() {
nnz = 0;
}
~DCSRTile(void) {
if (!isEmpty()) {
_mm_free(a);
_mm_free(ja);
_mm_free(ia);
_mm_free(row_ids);
delete [] partition_ptrs;
}
}
};
#endif // SRC_DCSRTILE_H_
|
GB_binop__rminus_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc64)
// A*D function (colscale): GB (_AxD__rminus_fc64)
// D*A function (rowscale): GB (_DxB__rminus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc64)
// C=scalar+B GB (_bind1st__rminus_fc64)
// C=scalar+B' GB (_bind1st_tran__rminus_fc64)
// C=A+scalar GB (_bind2nd__rminus_fc64)
// C=A'+scalar GB (_bind2nd_tran__rminus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_minus (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_minus (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FC64 || GxB_NO_RMINUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_minus (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_minus (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_minus (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_minus (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_teams_distribute_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute foo
void test_no_clause() {
int i;
#pragma omp target teams distribute
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute' must be a for loop}}
#pragma omp target teams distribute
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
#pragma omp target teams distribute collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_unaryop__minv_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_bool
// op(A') function: GB_tran__minv_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
chkompt.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <omp-tools.h>
#include "chkompt.h"
int nfails = 0;
void (*skew_delay_ptr)(int) = skew_delay;
void (*delay_ptr)(int) = delay;
// ------------------------------------------------------------------------
// Main program -- drive various tests
// ------------------------------------------------------------------------
int main(int argc, char **argv)
{
char buf[256];
starttime = gethrtime();
// fprintf(stderr, "main invoked\n");
// Set thread count; causes the initialization of the OMPT code
omp_set_num_threads(NUMTHREADS);
// test lock callbacks
lockcbtest();
(*delay_ptr)(10);
// test reduction
reductiontest();
(*delay_ptr)(10);
// Test frames for multiple loops in a single parallel region
testparallel();
(*delay_ptr)(10);
// Test frames for independent parallel for loops with static scheduling
testparallelfor();
(*delay_ptr)(10);
// test parallel sections
testparallelsections();
(*delay_ptr)(10);
// test explicit tasks
testtasks();
(*delay_ptr)(10);
// test triply nested loops
testtriple_nest();
(*delay_ptr)(10);
#ifndef NO_NONEST
testtriple_nonest();
(*delay_ptr)(10);
#endif
// Check for failures
if (nfails != 0 ) {
sprintf(buf, "\n FAILURE:\n\t%d ERROR%s detected\n\n",
nfails,
nfails == 1 ? "" : "s" );
ts_write(buf);
printf("\n FAILURE:\n\t%d ERROR%s detected\n\n",
nfails,
nfails == 1 ? "" : "s" );
exit(1);
} else {
ts_write("\n No failures\n\n");
printf("No failures\n");
exit(0);
}
// CHECK: No failures
}
// ------------------------------------------------------------------------
// Test "omp parallel" with "omp for" loops with various schedules
// ------------------------------------------------------------------------
void testparallel()
{
int i;
ts_write("\n starting testparallel\n\n");
#pragma omp parallel private(i)
{
(*validate_ptr)("parallel start");
#pragma omp master
ts_write("\n starting for\n\n");
#pragma omp for
for(i = 0; i < N; i++) (*validate_ptr)("for");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for static\n\n");
#pragma omp for schedule(static)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(static)");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for dynamic\n\n");
#pragma omp for schedule(dynamic)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(dynamic)");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for guided\n\n");
#pragma omp for schedule(guided)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(guided)");
#pragma omp master
(*delay_ptr)(10);
(*validate_ptr)("parallel end");
}
}
// ------------------------------------------------------------------------
// Test "omp parallel for" loops with various schedules
// ------------------------------------------------------------------------
void
testparallelfor()
{
teststatic();
(*delay_ptr)(10);
testdynamic();
(*delay_ptr)(10);
testguided();
(*delay_ptr)(10);
testsections();
(*delay_ptr)(10);
}
// ------------------------------------------------------------------------
// Test "omp parallel for" loops with various schedules
// ------------------------------------------------------------------------
void teststatic()
{
int i;
ts_write("\n starting teststatic\n\n");
#pragma omp parallel for schedule(static) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for static");
}
void testdynamic()
{
int i;
ts_write("\n starting testdynamic\n\n");
#pragma omp parallel for schedule(dynamic) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for dynamic");
}
void testguided()
{
int i;
ts_write("\n starting testguided\n\n");
#pragma omp parallel for schedule(guided) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for guided");
}
// ------------------------------------------------------------------------
// Test "omp sections"
// ------------------------------------------------------------------------
void testsections()
{
ts_write("\n starting testsections\n\n");
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
(*validate_ptr)("omp section 1");
#ifdef RUN_SKEW
(*skew_delay_ptr)(1);
#endif
}
#pragma omp section
{
(*validate_ptr)("omp section 2");
#ifdef RUN_SKEW
(*skew_delay_ptr)(2);
#endif
}
#pragma omp section
{
(*validate_ptr)("omp section 3");
#ifdef RUN_SKEW
(*skew_delay_ptr)(3);
#endif
}
}
}
}
void testparallelsections()
{
ts_write("\n starting testparallelsections\n\n");
#pragma omp parallel sections num_threads(NUMTHREADS)
{
#pragma omp section
(*validate_ptr)("omp parallel section 1");
#pragma omp section
(*validate_ptr)("omp parallel section 2");
#pragma omp section
(*validate_ptr)("omp parallel section 3");
}
}
void testtasks()
{
ts_write("\n starting testtasks\n\n");
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
(*validate_ptr)("omp task 1");
#pragma omp task
(*validate_ptr)("omp task 2");
#pragma omp task
(*validate_ptr)("omp task 3");
#pragma omp task
(*validate_ptr)("omp task 4");
#pragma omp task
(*validate_ptr)("omp task 5");
#pragma omp task
(*validate_ptr)("omp task 6");
#pragma omp task
(*validate_ptr)("omp task 7");
#pragma omp task
(*validate_ptr)("omp task 8");
#pragma omp task
(*validate_ptr)("omp task 9");
}
}
}
void loop0();
void loop1();
void loop2();
void loop3();
// testtriple_nest -- test a triply-nested set of loops, with nesting enabled
//
void testtriple_nest()
{
ts_write("\n starting testtriple_nest\n\n");
// Set omp_max_active_levels, to allow nested loops
omp_set_max_active_levels(5);
// now invoke the triply-nested loop
testtriple();
}
// testtriple_nonest -- test a triply-nested set of loops, with nesting disabled
//
void testtriple_nonest()
{
ts_write("\n starting testtriple_nonest\n\n");
// Set omp_max_active_levels, to 1, disallowing nesting
omp_set_max_active_levels(1);
// now invoke the triply-nested loop
testtriple();
}
// testtriple --the actual code, triply nested in source
void
testtriple()
{
#pragma omp parallel num_threads(2)
{
loop0();
#pragma omp parallel num_threads(2)
{
loop1();
#pragma omp parallel num_threads(3)
{
loop2();
#pragma omp parallel num_threads(3)
{
loop3();
}
}
}
}
// omp_set_num_threads(NUMTHREADS);
}
#define ITERATIONS 100000000
void
form_label(char *buffer, char *label)
{
int level, thread, pthread;
level = omp_get_level();
thread = omp_get_thread_num();
pthread = omp_get_ancestor_thread_num(level);
sprintf(buffer, "Begin %s t=%d l=%d pt=%d", label, thread, level, pthread);
}
void
loop0()
{
int j;
char buf[100];
form_label( buf, "loop0");
(*validate_ptr)(buf);
for(j=0;j<ITERATIONS;j+=2) j--;
strncpy (buf, "End ", 6);
(*validate_ptr)(buf);
}
void
loop1()
{
int j;
char buf[100];
form_label( buf, "loop1");
(*validate_ptr)(buf);
for(j=0;j<ITERATIONS;j+=2) j--;
strncpy (buf, "End ", 6);
(*validate_ptr)(buf);
}
void
loop2()
{
int j;
char buf[100];
form_label( buf, "loop2");
(*validate_ptr)(buf);
for(j=0;j<ITERATIONS;j+=2) j--;
strncpy (buf, "End ", 6);
(*validate_ptr)(buf);
}
void
loop3()
{
int j;
char buf[100];
form_label( buf, "loop3");
(*validate_ptr)(buf);
for(j=0;j<ITERATIONS;j+=2) j--;
strncpy (buf, "End ", 6);
(*validate_ptr)(buf);
}
// reductiontest -- check for appropriate callbacks
//
void
reductiontest()
{
int sum, i;
ts_write("\n starting reductiontest\n\n");
sum = 0;
#pragma omp parallel for reduction(+:sum)
for(i = 0; i < N; i++) {
sum += i;
(*validate_ptr)("reductiontest");
}
}
// -----------------------------------------------------------
// lockcbtest -- make various omp lock calls and verify that
// the code pointers are plausible
//
void
lockcbtest()
{
omp_lock_t lock1, lock2;
omp_nest_lock_t lock3;
ts_write("\n starting lockcbtest\n\n");
// initialize the locks
omp_init_lock(&lock1);
omp_init_lock(&lock2);
omp_init_nest_lock(&lock3);
#pragma omp parallel
{
(*validate_ptr)("lockcb start");
#pragma omp master
{
omp_set_lock(&lock1); // code pointer should be approximately label1
label1: omp_unset_lock(&lock1);
omp_set_lock(&lock2); // code pointer should be approximately label2
label2: omp_unset_lock(&lock2);
// now try a nested lock
omp_set_nest_lock(&lock3);
omp_set_nest_lock(&lock3);
omp_set_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
}
(*validate_ptr)("lockcb end");
}
omp_destroy_lock(&lock1);
omp_destroy_lock(&lock2);
omp_destroy_nest_lock(&lock3);
}
// ------------------------------------------------------------------------
// skew_delay -- burn CPU time to delay threads
// ------------------------------------------------------------------------
void
skew_delay(int count)
{
int j,k;
volatile float x;
int jmax;
jmax = 7 * count;
for ( j = 0; j < jmax; j++ ) {
x = 0;
for (k = 0; k < NSKEW; k ++ ) {
x = x + 1.0;
}
}
}
// ------------------------------------------------------------------------
// delay -- burn CPU time in main program to space out operations
// ------------------------------------------------------------------------
void
delay(int count)
{
int j,k;
volatile float x;
int jmax;
jmax = 7 * count;
for ( j = 0; j < jmax; j++ ) {
x = 0;
for (k = 0; k < NSKEW; k ++ ) {
x = x + 1.0;
}
}
}
#include "omptcb.h"
|
residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME )
#define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
#include "includes/cfd_variables.h"
#include "containers/array_1d.h"
#include "utilities/openmp_utils.h"
#include "utilities/dof_updater.h"
#include "utilities/coordinate_transformation_utilities.h"
#include "processes/process.h"
namespace Kratos {
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/// Bossak time scheme for the incompressible flow problem.
/** This class provides a second order time scheme of the generalized-alpha Newmark
family of methods. It also includes code required to implement slip conditions
on the incompressible flow problem and provides the possibility of using a RANS
model by passing a turbulence model as an argument to the constructor.
This time scheme is intended to be used in combination with elements of type
ASGS2D, ASGS3D, VMS or derived classes.
To use the slip condition, set the SLIP flag on slip wall nodes. To use
a wall law in combination with the slip condition, use MonolithicWallCondition to
mesh the boundary
@see ASGS2D, ASGS3D, VMS, MonolithicWallConditon
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> {
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef Element::GeometryType GeometryType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor without a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor without a turbulence model with periodic conditions
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
unsigned int DomainSize,
const Variable<int>& rPeriodicIdVar)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(rPeriodicIdVar)
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = 0.0;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor without a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize,
Kratos::Flags& rSlipFlag)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs.
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor with a turbulence model
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize,
Process::Pointer pTurbulenceModel)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()),
mpTurbulenceModel(pTurbulenceModel)
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Constructor with a turbulence model and relaxation factor
*/
ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent(
double NewAlphaBossak,
double MoveMeshStrategy,
unsigned int DomainSize,
const double RelaxationFactor,
Process::Pointer pTurbulenceModel)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs
mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()),
mpTurbulenceModel(pTurbulenceModel)
{
//default values for the Newmark Scheme
mAlphaBossak = NewAlphaBossak;
mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2);
mGammaNewmark = 0.5 - mAlphaBossak;
mMeshVelocity = MoveMeshStrategy;
mRelaxationFactor = RelaxationFactor;
//Allocate auxiliary memory
int NumThreads = OpenMPUtils::GetNumThreads();
mMass.resize(NumThreads);
mDamp.resize(NumThreads);
mvel.resize(NumThreads);
macc.resize(NumThreads);
maccold.resize(NumThreads);
}
/** Destructor.
*/
~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override {
}
/*@} */
/**@name Operators
*/
/*@{ */
/**
Performing the update of the solution.
*/
//***************************************************************************
void Update(ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(r_model_part);
TSparseSpace::InplaceMult(Dv, mRelaxationFactor);
mpDofUpdater->UpdateDofs(rDofSet,Dv);
mRotationTool.RecoverVelocities(r_model_part);
AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b);
KRATOS_CATCH("")
}
//***************************************************************************
void AdditionalUpdateOperations(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b)
{
KRATOS_TRY
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector NodePartition;
OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition);
//updating time derivatives (nodally for efficiency)
#pragma omp parallel
{
array_1d<double, 3 > DeltaVel;
int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k];
ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1];
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) {
noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration);
if (mMeshVelocity == 2)//Lagrangian
{
if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15)
{
array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY);
UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration);
}
else
{
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3);
noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3);
}
}
}
}
KRATOS_CATCH("")
}
//***************************************************************************
//predicts the solution at the current step as
// v = vold
void Predict(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b) override
{
// if (rModelPart.GetCommunicator().MyPID() == 0)
// std::cout << "prediction" << std::endl;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector NodePartition;
OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition);
#pragma omp parallel
{
//array_1d<double, 3 > DeltaDisp;
int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k];
ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1];
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) {
array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1);
double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1);
//predicting velocity
//ATTENTION::: the prediction is performed only on free nodes
array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY);
double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE);
if ((itNode->pGetDof(VELOCITY_X))->IsFree())
(CurrentVelocity[0]) = OldVelocity[0];
if (itNode->pGetDof(VELOCITY_Y)->IsFree())
(CurrentVelocity[1]) = OldVelocity[1];
if (itNode->HasDofFor(VELOCITY_Z))
if (itNode->pGetDof(VELOCITY_Z)->IsFree())
(CurrentVelocity[2]) = OldVelocity[2];
if (itNode->pGetDof(PRESSURE)->IsFree())
CurrentPressure = OldPressure;
// updating time derivatives ::: please note that displacements and
// their time derivatives can not be consistently fixed separately
array_1d<double, 3 > DeltaVel;
noalias(DeltaVel) = CurrentVelocity - OldVelocity;
array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION);
UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration);
if (mMeshVelocity == 2) //Lagrangian
{
array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1);
array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0);
if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15)
{
noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY);
UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration);
}
else
{
itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0;
itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0;
itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0;
itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0;
}
}
}
}
// if (rModelPart.GetCommunicator().MyPID() == 0)
// std::cout << "end of prediction" << std::endl;
}
//***************************************************************************
/** this function is designed to be called in the builder and solver
to introduce
the selected time integration scheme. It "asks" the matrix needed to
the element and
performs the operations needed to introduce the seected time
integration scheme.
this function calculates at the same time the contribution to the
LHS and to the RHS
of the system
*/
void CalculateSystemContributions(Element::Pointer rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
//Initializing the non linear iteration for the current element
(rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo);
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
(rCurrentElement)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
//std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl;
(rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
(rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (statics is already included)
AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry());
KRATOS_CATCH("")
}
void Calculate_RHS_Contribution(Element::Pointer rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
int k = OpenMPUtils::ThisThread();
//Initializing the non linear iteration for the current element
(rCurrentElement) -> InitializeNonLinearIteration(CurrentProcessInfo);
//basic operations for the element considered
(rCurrentElement)->CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
(rCurrentElement)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentElement)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(RHS_Contribution,rCurrentElement->GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement->GetGeometry());
}
/** functions totally analogous to the precedent but applied to
the "condition" objects
*/
void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
int k = OpenMPUtils::ThisThread();
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
(rCurrentCondition) -> InitializeNonLinearIteration(CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->CalculateMassMatrix(mMass[k], CurrentProcessInfo);
//(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(EquationId, CurrentProcessInfo);
AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("")
}
void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
int k = OpenMPUtils::ThisThread();
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
//Initializing the non linear iteration for the current condition
(rCurrentCondition) -> InitializeNonLinearIteration(rCurrentProcessInfo);
//basic operations for the element considered
(rCurrentCondition)->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
(rCurrentCondition)->CalculateMassMatrix(mMass[k],rCurrentProcessInfo);
//(rCurrentCondition)->CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo);
(rCurrentCondition)->CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(EquationId,rCurrentProcessInfo);
//adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
mRotationTool.Rotate(RHS_Contribution,rCurrentCondition->GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("");
}
//*************************************************************************************
//*************************************************************************************
void InitializeSolutionStep(ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b);
double DeltaTime = CurrentProcessInfo[DELTA_TIME];
if (DeltaTime == 0)
KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 in the Bossak Scheme ... check if the time step is created correctly for the current model part", "");
//initializing constants
ma0 = 1.0 / (mGammaNewmark * DeltaTime);
ma1 = DeltaTime * mBetaNewmark / mGammaNewmark;
ma2 = (-1 + mGammaNewmark) / mGammaNewmark;
ma3 = DeltaTime;
ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0;
ma5 = pow(DeltaTime, 2) * mBetaNewmark;
mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime);
}
//*************************************************************************************
//*************************************************************************************
void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
const auto& r_current_process_info = rModelPart.GetProcessInfo();
if (mpTurbulenceModel) // If not null
mpTurbulenceModel->Execute();
//if orthogonal subscales are computed
if (r_current_process_info[OSS_SWITCH] == 1.0) {
KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl;
const int nnodes = static_cast<int>(rModelPart.Nodes().size());
auto nbegin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(nbegin,nnodes)
for(int i=0; i<nnodes; ++i)
{
auto ind = nbegin + i;
noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3);
ind->FastGetSolutionStepValue(DIVPROJ) = 0.0;
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}//end of loop over nodes
//loop on nodes to compute ADVPROJ CONVPROJ NODALAREA
array_1d<double, 3 > output = ZeroVector(3);
const int nel = static_cast<int>(rModelPart.Elements().size());
auto elbegin = rModelPart.ElementsBegin();
#pragma omp parallel for firstprivate(elbegin,nel,output)
for(int i=0; i<nel; ++i)
{
auto elem = elbegin + i;
elem->Calculate(ADVPROJ, output, r_current_process_info);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
// Correction for periodic conditions
this->PeriodicConditionProjectionCorrection(rModelPart);
#pragma omp parallel for firstprivate(nbegin,nnodes)
for(int i=0; i<nnodes; ++i)
{
auto ind = nbegin + i;
if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0)
{
ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
//KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************");
}
const double Area = ind->FastGetSolutionStepValue(NODAL_AREA);
ind->FastGetSolutionStepValue(ADVPROJ) /= Area;
ind->FastGetSolutionStepValue(DIVPROJ) /= Area;
}
}
}
void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
Element::EquationIdVectorType EquationId;
LocalSystemVectorType RHS_Contribution;
LocalSystemMatrixType LHS_Contribution;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode)
#pragma omp parallel for
for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++)
{
auto itNode = rModelPart.NodesBegin() + k;
(itNode->FastGetSolutionStepValue(REACTION)).clear();
// calculating relaxed acceleration
const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0);
const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1);
const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration
+ mAlphaBossak * OldAcceleration;
(itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration);
}
//for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem)
#pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution)
for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++)
{
auto itElem = rModelPart.Elements().ptr_begin()+k;
int thread_id = OpenMPUtils::ThisThread();
(*itElem)->InitializeNonLinearIteration(CurrentProcessInfo);
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
(*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
//std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl;
(*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo);
(*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo);
(*itElem)->EquationIdVector(EquationId, CurrentProcessInfo);
//adding the dynamic contributions (statics is already included)
AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo);
AddDynamicsToRHS((*itElem), RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo);
GeometryType& rGeom = (*itElem)->GetGeometry();
unsigned int NumNodes = rGeom.PointsNumber();
unsigned int Dimension = rGeom.WorkingSpaceDimension();
unsigned int index = 0;
for (unsigned int i = 0; i < NumNodes; i++)
{
auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION);
double& target_value0 = reaction[0];
const double& origin_value0 = RHS_Contribution[index++];
#pragma omp atomic
target_value0 -= origin_value0;
double& target_value1 = reaction[1];
const double& origin_value1 = RHS_Contribution[index++];
#pragma omp atomic
target_value1 -= origin_value1;
if (Dimension == 3)
{
double& target_value2 = reaction[2];
const double& origin_value2 = RHS_Contribution[index++];
#pragma omp atomic
target_value2 -= origin_value2;
}
// rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++];
// rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++];
// if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++];
index++; // skip pressure dof
}
}
rModelPart.GetCommunicator().AssembleCurrentData(REACTION);
// Base scheme calls FinalizeSolutionStep method of elements and conditions
Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b);
}
//************************************************************************************************
//************************************************************************************************
/// Free memory allocated by this object.
void Clear() override
{
this->mpDofUpdater->Clear();
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
double mAlphaBossak;
double mBetaNewmark;
double mGammaNewmark;
double mMeshVelocity;
double mRelaxationFactor = 1.0;
double ma0;
double ma1;
double ma2;
double ma3;
double ma4;
double ma5;
double mam;
std::vector< Matrix > mMass;
std::vector< Matrix > mDamp;
std::vector< Vector > mvel;
std::vector< Vector > macc;
std::vector< Vector > maccold;
/*@} */
/**@name Protected Operators*/
/*@{ */
/** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on
* both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n
* 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n
* 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n
* 3- The value on all periodic nodes is replaced by the one received in step 2.
*/
void PeriodicConditionProjectionCorrection(ModelPart& rModelPart)
{
const int num_nodes = rModelPart.NumberOfNodes();
const int num_conditions = rModelPart.NumberOfConditions();
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
it_node->SetValue(NODAL_AREA,0.0);
it_node->SetValue(ADVPROJ,ZeroVector(3));
it_node->SetValue(DIVPROJ,0.0);
}
#pragma omp parallel for
for (int i = 0; i < num_conditions; i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
if(it_cond->Is(PERIODIC)) {
this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry());
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
this->CorrectContributionsOnPeriodicNode(*it_node);
}
}
void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry)
{
unsigned int nodes_in_cond = rGeometry.PointsNumber();
double nodal_area = 0.0;
array_1d<double,3> momentum_projection = ZeroVector(3);
double mass_projection = 0.0;
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA);
noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ);
mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ);
}
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
/* Note that this loop is expected to be threadsafe in normal conditions,
* since each node should belong to a single periodic link. However, I am
* setting the locks for openmp in case that we try more complicated things
* in the future (like having different periodic conditions for different
* coordinate directions).
*/
r_node.SetLock();
r_node.GetValue(NODAL_AREA) = nodal_area;
noalias(r_node.GetValue(ADVPROJ)) = momentum_projection;
r_node.GetValue(DIVPROJ) = mass_projection;
r_node.UnSetLock();
}
}
void CorrectContributionsOnPeriodicNode(Node<3>& rNode)
{
if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set.
{
rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA);
noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ);
rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ);
}
}
//*********************************************************************************
//Updating first time Derivative
//*********************************************************************************
void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement,
const array_1d<double, 3 > & OldDisplacement,
const array_1d<double, 3 > & OldVelocity,
const array_1d<double, 3 > & OldAcceleration,
const array_1d<double, 3 > & CurrentAcceleration)
{
noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration;
}
//**************************************************************************
void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration,
const array_1d<double, 3 > & DeltaVel,
const array_1d<double, 3 > & OldAcceleration)
{
noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration;
}
//****************************************************************************
/**
Kdyn = am*M + D + a1*K
*/
void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
ProcessInfo& CurrentProcessInfo)
{
//multipling time scheme factor
LHS_Contribution *= ma1;
// adding mass contribution to the dynamic stiffness
if (M.size1() != 0) // if M matrix declared
{
noalias(LHS_Contribution) += mam*M;
}
//adding damping contribution
if (D.size1() != 0) // if M matrix declared
{
noalias(LHS_Contribution) += D;
}
}
//****************************************************************************
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current element.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element itself.
* @param[in] rCurrentElement The fluid element we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(Element::Pointer rCurrentElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo)
{
//adding inertia contribution
if (rM.size1() != 0) {
int k = OpenMPUtils::ThisThread();
rCurrentElement->GetSecondDerivativesVector(macc[k], 0);
(macc[k]) *= (1.00 - mAlphaBossak);
rCurrentElement->GetSecondDerivativesVector(maccold[k], 1);
noalias(macc[k]) += mAlphaBossak * maccold[k];
noalias(rRHS_Contribution) -= prod(rM, macc[k]);
}
}
/// Add Bossak contributions from the inertial term to the RHS vector.
/** This essentially performs bdyn = b - M*acc for the current condition.
* Note that viscous/pressure contributions to the RHS are expected to be added by the element condition.
* @param[in] rCurrentCondition The fluid condition we are assembling.
* @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added.
* @param[in] rD The elemental velocity/pressure LHS matrix.
* @param[in] rM The elemental acceleration LHS matrix.
* @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart.
*/
void AddDynamicsToRHS(
Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& rM,
ProcessInfo& rCurrentProcessInfo)
{
//adding inertia contribution
if (rM.size1() != 0)
{
int k = OpenMPUtils::ThisThread();
rCurrentCondition->GetSecondDerivativesVector(macc[k], 0);
(macc[k]) *= (1.00 - mAlphaBossak);
rCurrentCondition->GetSecondDerivativesVector(maccold[k], 1);
noalias(macc[k]) += mAlphaBossak * maccold[k];
noalias(rRHS_Contribution) -= prod(rM, macc[k]);
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool;
const Variable<int>& mrPeriodicIdVar;
Process::Pointer mpTurbulenceModel;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/variable_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "utilities/builtin_timer.h"
#include "utilities/atomic_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @tparam TSparseSpace The sparse system considered
* @tparam TDenseSpace The dense system considered
* @tparam TLinearSolver The linear solver considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Definition of the flags
KRATOS_DEFINE_LOCAL_FLAG( SILENT_WARNINGS );
// Scaling enum
enum class SCALING_DIAGONAL {NO_SCALING = 0, CONSIDER_NORM_DIAGONAL = 1, CONSIDER_MAX_DIAGONAL = 2, CONSIDER_PRESCRIBED_DIAGONAL = 3};
/// Definition of the pointer
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = rModelPart.ElementsBegin();
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Contributions to the system
LocalSystemMatrixType lhs_contribution(0, 0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements, nconditions, lhs_contribution, equation_id )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active)
{
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time LHS: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished parallel building LHS" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
virtual void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(rb.size());
InternalSystemSolveWithPhysics(rA, Dxmodified, rb, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, rDx);
} else {
InternalSystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedBlockBuilderAndSolver", mOptions.IsNot(SILENT_WARNINGS)) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
const auto timer_constraints = BuiltinTimer();
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "Constraints build time: " << timer_constraints.ElapsedSeconds() << std::endl;
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time Linearizing with the database at the old iteration
* @details It is ideally the fastest and safer function to use when it is possible to solve just after building
* @param pScheme The pointer to the integration scheme
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rDx The vector of unkowns
* @param rb The RHS vector of the system of equations
* @param MoveMesh tells if the update of the scheme needs to be performed when calling the Update of the scheme
*/
void BuildAndSolveLinearizedOnPreviousIteration(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh
) override
{
Timer::Start("Linearizing on Old iteration");
KRATOS_INFO_IF("BlockBuilderAndSolver", this->GetEchoLevel() > 0) << "Linearizing on Old iteration" << std::endl;
KRATOS_ERROR_IF(rModelPart.GetBufferSize() == 1) << "BlockBuilderAndSolver: \n"
<< "The buffer size needs to be at least 2 in order to use \n"
<< "BuildAndSolveLinearizedOnPreviousIteration \n"
<< "current buffer size for modelpart: " << rModelPart.Name() << std::endl
<< "is :" << rModelPart.GetBufferSize()
<< " Please set IN THE STRATEGY SETTINGS "
<< " UseOldStiffnessInFirstIteration=false " << std::endl;
DofsArrayType fixed_dofs;
for(auto& r_dof : BaseType::mDofSet){
if(r_dof.IsFixed()){
fixed_dofs.push_back(&r_dof);
r_dof.FreeDof();
}
}
//TODO: Here we need to take the vector from other ones because
// We cannot create a trilinos vector without a communicator. To be improved!
TSystemVectorType dx_prediction(rDx);
TSystemVectorType rhs_addition(rb); //we know it is zero here, so we do not need to set it
// Here we bring back the database to before the prediction,
// but we store the prediction increment in dx_prediction.
// The goal is that the stiffness is computed with the
// converged configuration at the end of the previous step.
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
// NOTE: this is initialzed to - the value of dx prediction
dx_prediction[rDof.EquationId()] = -(rDof.GetSolutionStepValue() - rDof.GetSolutionStepValue(1));
});
// Use UpdateDatabase to bring back the solution to how it was at the end of the previous step
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
Timer::Stop("Linearizing on Old iteration");
Timer::Start("Build");
this->Build(pScheme, rModelPart, rA, rb);
Timer::Stop("Build");
// Put back the prediction into the database
TSparseSpace::InplaceMult(dx_prediction, -1.0); //change sign to dx_prediction
TSparseSpace::UnaliasedAdd(rDx, 1.0, dx_prediction);
// Use UpdateDatabase to bring back the solution
// to where it was taking into account BCs
// it is done here so that constraints are correctly taken into account right after
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
// Apply rb -= A*dx_prediction
TSparseSpace::Mult(rA, dx_prediction, rhs_addition);
TSparseSpace::UnaliasedAdd(rb, -1.0, rhs_addition);
for(auto& dof : fixed_dofs)
dof.FixDof();
if (!rModelPart.MasterSlaveConstraints().empty()) {
const auto timer_constraints = BuiltinTimer();
Timer::Start("ApplyConstraints");
this->ApplyConstraints(pScheme, rModelPart, rA, rb);
Timer::Stop("ApplyConstraints");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "Constraints build time: " << timer_constraints.ElapsedSeconds() << std::endl;
}
this->ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
this->SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyRHSConstraints");
ApplyRHSConstraints(pScheme, rModelPart, rb);
Timer::Stop("ApplyRHSConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("BuildRHS");
BuildRHSNoDirichlet(pScheme,rModelPart,b);
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
const std::size_t i = rDof.EquationId();
if (rDof.IsFixed())
b[i] = 0.0;
});
Timer::Stop("BuildRHS");
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
IndexPartition<std::size_t>(BaseType::mDofSet.size()).for_each([&, this](std::size_t Index){
typename DofsArrayType::iterator dof_iterator = this->mDofSet.begin() + Index;
dof_iterator->SetEquationId(Index);
});
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
const std::size_t i = rDof.EquationId();
rDof.GetSolutionStepReactionValue() = -b[i];
});
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
const std::size_t system_size = rA.size1();
Vector scaling_factors (system_size);
const auto it_dof_iterator_begin = BaseType::mDofSet.begin();
// NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
IndexPartition<std::size_t>(BaseType::mDofSet.size()).for_each([&](std::size_t Index){
auto it_dof_iterator = it_dof_iterator_begin + Index;
if (it_dof_iterator->IsFixed()) {
scaling_factors[Index] = 0.0;
} else {
scaling_factors[Index] = 1.0;
}
});
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// The diagonal considered
mScaleFactor = GetScaleNorm(rModelPart, rA);
// Detect if there is a line of all zeros and set the diagonal to a 1 if this happens
IndexPartition<std::size_t>(system_size).for_each([&](std::size_t Index){
bool empty = true;
const std::size_t col_begin = Arow_indices[Index];
const std::size_t col_end = Arow_indices[Index + 1];
for (std::size_t j = col_begin; j < col_end; ++j) {
if(Avalues[j] != 0.0) {
empty = false;
break;
}
}
if(empty) {
rA(Index, Index) = mScaleFactor;
rb[Index] = 0.0;
}
});
IndexPartition<std::size_t>(system_size).for_each([&](std::size_t Index){
const std::size_t col_begin = Arow_indices[Index];
const std::size_t col_end = Arow_indices[Index+1];
const double k_factor = scaling_factors[Index];
if (k_factor == 0.0) {
// Zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (Acol_indices[j] != Index )
Avalues[j] = 0.0;
// Zero out the RHS
rb[Index] = 0.0;
} else {
// Zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
});
}
/**
* @brief Applies the constraints with master-slave relation matrix (RHS only)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rb The RHS vector
*/
void ApplyRHSConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
// Apply diagonal values on slaves
IndexPartition<std::size_t>(mSlaveIds.size()).for_each([&](std::size_t Index){
const IndexType slave_equation_id = mSlaveIds[Index];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rb[slave_equation_id] = 0.0;
}
});
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints with master-slave relation matrix
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
const double max_diag = GetMaxDiagonal(rA);
// Apply diagonal values on slaves
IndexPartition<std::size_t>(mSlaveIds.size()).for_each([&](std::size_t Index){
const IndexType slave_equation_id = mSlaveIds[Index];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
});
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "block_builder_and_solver",
"block_builder" : true,
"diagonal_values_for_dirichlet_dofs" : "use_max_diagonal",
"silent_warnings" : false
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "block_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
double mScaleFactor = 1.0; /// The manuallyset scale factor
SCALING_DIAGONAL mScalingDiagonal; /// We identify the scaling considered for the dirichlet dofs
Flags mOptions; /// Some flags used internally
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
Timer::Start("ConstraintsRelationMatrixStructure");
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].lock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].unlock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
IndexPartition<std::size_t>(mT.size1()).for_each([&](std::size_t Index){
const IndexType row_begin = Trow_indices[Index];
const IndexType row_end = Trow_indices[Index + 1];
IndexType k = row_begin;
for (auto it = indices[Index].begin(); it != indices[Index].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[Index].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
});
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
AtomicAdd(r_value, constant_value);
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
block_for_each(indices, [](std::unordered_set<std::size_t>& rIndices){
rIndices.reserve(40);
});
Element::EquationIdVectorType ids(3, 0);
block_for_each(rModelPart.Elements(), ids, [&](Element& rElem, Element::EquationIdVectorType& rIdsTLS){
pScheme->EquationId(rElem, rIdsTLS, CurrentProcessInfo);
for (std::size_t i = 0; i < rIdsTLS.size(); i++) {
lock_array[rIdsTLS[i]].lock();
auto& row_indices = indices[rIdsTLS[i]];
row_indices.insert(rIdsTLS.begin(), rIdsTLS.end());
lock_array[rIdsTLS[i]].unlock();
}
});
block_for_each(rModelPart.Conditions(), ids, [&](Condition& rCond, Element::EquationIdVectorType& rIdsTLS){
pScheme->EquationId(rCond, rIdsTLS, CurrentProcessInfo);
for (std::size_t i = 0; i < rIdsTLS.size(); i++) {
lock_array[rIdsTLS[i]].lock();
auto& row_indices = indices[rIdsTLS[i]];
row_indices.insert(rIdsTLS.begin(), rIdsTLS.end());
lock_array[rIdsTLS[i]].unlock();
}
});
if (rModelPart.MasterSlaveConstraints().size() != 0) {
struct TLS
{
Element::EquationIdVectorType master_ids = Element::EquationIdVectorType(3,0);
Element::EquationIdVectorType slave_ids = Element::EquationIdVectorType(3,0);
};
TLS tls;
block_for_each(rModelPart.MasterSlaveConstraints(), tls, [&](MasterSlaveConstraint& rConst, TLS& rTls){
rConst.EquationIdVector(rTls.slave_ids, rTls.master_ids, CurrentProcessInfo);
for (std::size_t i = 0; i < rTls.slave_ids.size(); i++) {
lock_array[rTls.slave_ids[i]].lock();
auto& row_indices = indices[rTls.slave_ids[i]];
row_indices.insert(rTls.slave_ids[i]);
lock_array[rTls.slave_ids[i]].unlock();
}
for (std::size_t i = 0; i < rTls.master_ids.size(); i++) {
lock_array[rTls.master_ids[i]].lock();
auto& row_indices = indices[rTls.master_ids[i]];
row_indices.insert(rTls.master_ids[i]);
lock_array[rTls.master_ids[i]].unlock();
}
});
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
IndexPartition<std::size_t>(A.size1()).for_each([&](std::size_t i){
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
});
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
AtomicAdd(r_a, v_a);
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& rA,
const LocalSystemMatrixType& rLHSContribution,
Element::EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; i_local++) {
const IndexType i_global = rEquationId[i_local];
AssembleRowContribution(rA, rLHSContribution, i_global, i_local, rEquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
AtomicAdd(b_value, rhs_value);
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
AtomicAdd(r_a, v_a);
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
AtomicAdd(r, v);
last_found = id_to_find;
last_pos = pos;
}
}
/**
* @brief This method returns the scale norm considering for scaling the diagonal
* @param rModelPart The problem model part
* @param rA The LHS matrix
* @return The scale norm
*/
double GetScaleNorm(
ModelPart& rModelPart,
TSystemMatrixType& rA
)
{
switch (mScalingDiagonal) {
case SCALING_DIAGONAL::NO_SCALING:
return 1.0;
case SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL: {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
KRATOS_ERROR_IF_NOT(r_current_process_info.Has(BUILD_SCALE_FACTOR)) << "Scale factor not defined at process info" << std::endl;
return r_current_process_info.GetValue(BUILD_SCALE_FACTOR);
}
case SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL:
return GetDiagonalNorm(rA)/static_cast<double>(rA.size1());
case SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL:
return GetMaxDiagonal(rA);
// return TSparseSpace::TwoNorm(rA)/static_cast<double>(rA.size1());
default:
return GetMaxDiagonal(rA);
}
}
/**
* @brief This method returns the diagonal norm considering for scaling the diagonal
* @param rA The LHS matrix
* @return The diagonal norm
*/
double GetDiagonalNorm(TSystemMatrixType& rA)
{
double diagonal_norm = 0.0;
diagonal_norm = IndexPartition<std::size_t>(TSparseSpace::Size1(rA)).for_each<SumReduction<double>>([&](std::size_t Index){
return std::pow(rA(Index,Index), 2);
});
return std::sqrt(diagonal_norm);
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetAveragevalueDiagonal(TSystemMatrixType& rA)
{
return 0.5 * (GetMaxDiagonal(rA) + GetMinDiagonal(rA));
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetMaxDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double max_diag = 0.0;
// #pragma omp parallel for reduction(max:max_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// max_diag = std::max(max_diag, std::abs(rA(i,i)));
// }
// return max_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector max_vector(num_threads, 0.0);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii > max_vector[id])
max_vector[id] = abs_value_ii;
}
double max_diag = 0.0;
for(int i = 0; i < num_threads; ++i) {
max_diag = std::max(max_diag, max_vector[i]);
}
return max_diag;
}
/**
* @brief This method returns the diagonal min value
* @param rA The LHS matrix
* @return The diagonal min value
*/
double GetMinDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double min_diag = std::numeric_limits<double>::max();
// #pragma omp parallel for reduction(min:min_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// min_diag = std::min(min_diag, std::abs(rA(i,i)));
// }
// return min_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector min_vector(num_threads, std::numeric_limits<double>::max());
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii < min_vector[id])
min_vector[id] = abs_value_ii;
}
double min_diag = std::numeric_limits<double>::max();
for(int i = 0; i < num_threads; ++i) {
min_diag = std::min(min_diag, min_vector[i]);
}
return min_diag;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Setting flags<
const std::string& r_diagonal_values_for_dirichlet_dofs = ThisParameters["diagonal_values_for_dirichlet_dofs"].GetString();
std::set<std::string> available_options_for_diagonal = {"no_scaling","use_max_diagonal","use_diagonal_norm","defined_in_process_info"};
if (available_options_for_diagonal.find(r_diagonal_values_for_dirichlet_dofs) == available_options_for_diagonal.end()) {
std::stringstream msg;
msg << "Currently prescribed diagonal values for dirichlet dofs : " << r_diagonal_values_for_dirichlet_dofs << "\n";
msg << "Admissible values for the diagonal scaling are : no_scaling, use_max_diagonal, use_diagonal_norm, or defined_in_process_info" << "\n";
KRATOS_ERROR << msg.str() << std::endl;
}
// The first option will not consider any scaling (the diagonal values will be replaced with 1)
if (r_diagonal_values_for_dirichlet_dofs == "no_scaling") {
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_max_diagonal") {
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_diagonal_norm") { // On this case the norm of the diagonal will be considered
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL;
} else { // Otherwise we will assume we impose a numerical value
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL;
}
mOptions.Set(SILENT_WARNINGS, ThisParameters["silent_warnings"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpace, class TDenseSpace, class TLinearSolver>
const Kratos::Flags ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::SILENT_WARNINGS(Kratos::Flags::Create(0));
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
wserver.c | /*
MIT License
Copyright (c) 2017 Emanuele Giona
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "wserver.h"
int XOR(int a, int b) {
return a^b;
}
int fileXOR(char srcfile[], char dstfile[], long long dim, int seed) {
//apertura file
HANDLE src = CreateFile(srcfile,GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING, FILE_FLAG_RANDOM_ACCESS,NULL);
if (src==INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura file %s.\n", srcfile);
return 400;
}
HANDLE dst = CreateFile(dstfile, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_FLAG_RANDOM_ACCESS, NULL);
if (dst==INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura file %s.\n", dstfile);
CloseHandle(src);
return 400;
}
//lock file
OVERLAPPED srcoverlap;
memset(&srcoverlap,0,sizeof(srcoverlap));
if (!LockFileEx(src, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 0, dim, &srcoverlap)) {
sprintf(lastError, "Errore lock su file %s.\n", srcfile);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
OVERLAPPED dstoverlap;
memset(&dstoverlap, 0, sizeof(dstoverlap));
if (!LockFileEx(dst, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 0, dim, &dstoverlap)) {
sprintf(lastError, "Errore lock su file %s.\n", dstfile);
UnlockFileEx(src,0,0,dim,&srcoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
LARGE_INTEGER fileSize, fileMapSize, mapViewSize, fileMapStart;
DWORD granularity;
SYSTEM_INFO sysInfo;
long offset;
GetSystemInfo(&sysInfo);
granularity = sysInfo.dwAllocationGranularity;
//arrivo in fondo al file di output
LARGE_INTEGER LIrounded;
LIrounded.HighPart = 0;
LIrounded.LowPart = dim - 2;
if(!SetFilePointerEx(dst,LIrounded,NULL,FILE_BEGIN)){
sprintf(lastError, "Errore stretch file %s.\n", dstfile);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//scrivo un placeholder per mantenere le modifiche di dimensione
char buff[] = "\0";
if (!WriteFile(dst, buff, sizeof(buff), NULL, NULL)) {
sprintf(lastError, "Errore scrittura su file %s.\n", dstfile);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//imposto il seed per rand()
srand(seed);
//file mapping per entrambi i file
HANDLE handle_srcmap = CreateFileMapping(src, NULL, PAGE_READONLY, 0, 0, NULL);
if (handle_srcmap == NULL) {
sprintf(lastError, "Errore file mapping su file %s: %d\n", srcfile, GetLastError());
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
HANDLE handle_dstmap = CreateFileMapping(dst, NULL, PAGE_READWRITE, 0, 0, NULL);
if (handle_dstmap == NULL) {
sprintf(lastError, "Errore file mapping su file %s: %d\n", dstfile, GetLastError());
CloseHandle(handle_srcmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//thread non necessari sotto 256KB
if (dim <= 256 * 1024) {
MEMORYSTATUSEX memstatus;
memstatus.dwLength = sizeof(memstatus);
GlobalMemoryStatusEx(&memstatus);
long freeMem = memstatus.ullAvailVirtual;
if (freeMem <= 3 * dim) {
sprintf(lastError, "RAM insufficiente per aprire il file %s.\n", srcfile);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//utilizzo effettivamente il mapping, per intero
char *srcmap = (char *)MapViewOfFile(handle_srcmap,FILE_MAP_READ,0,0,0);
if ((LPVOID)srcmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d.\n", srcfile, GetLastError());
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
char *dstmap = (char *)MapViewOfFile(handle_dstmap, FILE_MAP_ALL_ACCESS, 0, 0, 0);
if ((LPVOID)dstmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d.\n", dstfile, GetLastError());
UnmapViewOfFile((LPVOID)srcmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//array della chiave per lo XOR, 4 byte consecutivi con la stessa chiave
long keyDim = (long)ceil((double)dim / 4) * 4;
int *key;
key = malloc(keyDim * sizeof(int));
if (key == NULL) {
sprintf(lastError, "Errore malloc.\n");
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
for (long i = 0; i<keyDim; i++) {
key[i] = rand() % 65536; //limite del numero generato, per portabilita' tra compilatori
}
//effettua lo XOR e scrivi nel mapping, byte a byte
long i, j;
for (i = 0, j = 0; i<dim && j<keyDim; i += 4, j++) {
dstmap[i] = (char)(XOR((int)srcmap[i], key[j]));
dstmap[i + 1] = (char)(XOR((int)srcmap[i + 1], key[j]));
dstmap[i + 2] = (char)(XOR((int)srcmap[i + 2], key[j]));
dstmap[i + 3] = (char)(XOR((int)srcmap[i + 3], key[j]));
}
free(key);
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
}
//sono necessari thread, utilizzo OpenMP; suggerito: 1 thread omp per ogni blocco di 256KB
else {
long fiveMB = 5 * pow(2, 20);
int chunks = (int)ceil((double)dim / fiveMB);
for (int c = 0; c<chunks; c++) {
MEMORYSTATUSEX memstatus;
memstatus.dwLength = sizeof(memstatus);
GlobalMemoryStatusEx(&memstatus);
long freeMem = memstatus.ullAvailVirtual;
if (freeMem <= 2 * fiveMB) {
sprintf(lastError, "RAM insufficiente per aprire il file %s.\n", srcfile);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
long start = (c)*fiveMB;
long end = (c + 1)*fiveMB;
long realEnd = end;
if (dim<realEnd)
realEnd = dim;
long chunkDim = realEnd - start;
fileMapStart.QuadPart = (start/granularity)*granularity;
offset = start - fileMapStart.QuadPart;
if (dim - fileMapStart.LowPart < chunkDim)
chunkDim = dim - fileMapStart.LowPart;
mapViewSize.QuadPart = (start%granularity) + chunkDim;
//mapping del chunk c
char *srcmap = (char *)MapViewOfFile(handle_srcmap, FILE_MAP_READ, fileMapStart.HighPart, fileMapStart.LowPart, mapViewSize.QuadPart);
if ((LPVOID)srcmap == NULL) {
sprintf(lastError, "Errore mapview su file %s, chunk #%i: %d\n", srcfile, c, GetLastError());
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//arrivo correttamente all'inizio del mapping, bypassando la granularità
srcmap += offset;
char *dstmap = (char *)MapViewOfFile(handle_dstmap, FILE_MAP_ALL_ACCESS, fileMapStart.HighPart, fileMapStart.LowPart, mapViewSize.QuadPart);
if ((LPVOID)dstmap == NULL) {
sprintf(lastError, "Errore mapview su file %s: %d\n", dstfile, GetLastError());
UnmapViewOfFile((LPVOID)srcmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
//arrivo correttamente all'inizio del mapping, bypassando la granularità
dstmap += offset;
//1 thread OpenMP ogni 256KB
int mpThreads = (int)ceil((double)chunkDim / (256 * 1024));
//matrice della chiave per lo XOR, 4 byte consecutivi con la stessa chiave
//ogni thread OpenMP ha il suo array di dimensione ridotta
long keyDimT = (long)ceil((double)chunkDim / (mpThreads*4));
int *key;
key = malloc(mpThreads*keyDimT*sizeof(int));
if (key == NULL) {
sprintf(lastError, "Errore malloc.\n");
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
return 500;
}
for (long j = 0; j<mpThreads; j++) {
for (long i = 0; i<keyDimT; i++) {
key[j*mpThreads + i] = rand() % 65536; //limite del numero generato, per portabilita' tra compilatori
}
}
#pragma omp parallel num_threads(mpThreads)
{
int threadID = omp_get_thread_num();
int min = (threadID) * 256 * 1024;
int max = (threadID + 1) * 256 * 1024;
//effettua lo XOR e scrivi nel mapping, byte a byte con ogni thread unicamente nella sua sezione
for (long i = min; i<max && i<chunkDim; i += 4) {
int val = key[(threadID*mpThreads) + ((i - min) / 4)];
dstmap[i] = (char)(XOR((int)srcmap[i], val));
dstmap[i + 1] = (char)(XOR((int)srcmap[i + 1], val));
dstmap[i + 2] = (char)(XOR((int)srcmap[i + 2], val));
dstmap[i + 3] = (char)(XOR((int)srcmap[i + 3], val));
}
}
free(key);
UnmapViewOfFile((LPVOID)srcmap);
UnmapViewOfFile((LPVOID)dstmap);
}
CloseHandle(handle_srcmap);
CloseHandle(handle_dstmap);
UnlockFileEx(src, 0, 0, dim, &srcoverlap);
UnlockFileEx(dst, 0, 0, dim, &dstoverlap);
CloseHandle(src);
CloseHandle(dst);
}
return 200;
}
int sendMessage(SOCKET sock, char message[]) {
char buf[BUFSIZE];
ZeroMemory(buf,BUFSIZE);
strncpy(buf,message,BUFSIZE);
int res = send(sock, buf, BUFSIZE, 0);
if (res == SOCKET_ERROR) {
printf("Errore send: %d\n", WSAGetLastError());
closesocket(sock);
//WSACleanup();
return 1;
}
return 0;
}
int encrypt(char src[], int seed, SOCKET sock) {
char dst[PATHLEN] = "";
strncpy(dst, src, strlen(src));
strncat(dst, "_enc", 5);
LARGE_INTEGER dim;
HANDLE srcfile;
srcfile = CreateFile(src, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (srcfile == INVALID_HANDLE_VALUE) {
sprintf(lastError, "File %s non esistente.\n", src);
return 400;
}
if (!GetFileSizeEx(srcfile,&dim)) {
sprintf(lastError, "Errore nel calcolo dimensione del file %s.\n", src);
CloseHandle(srcfile);
return 500;
}
CloseHandle(srcfile);
int ret = fileXOR(src, dst, (long long)dim.QuadPart, seed);
if (ret == 200 && !DeleteFile(src)) {
sprintf(lastError, "Errore nella cancellazione del file %s: %d\n", src, GetLastError());
return 500;
}
return ret;
}
int decrypt(char src[], int seed, SOCKET sock) {
char *enc = NULL;
char *temp = strstr(src, "_enc");
while (temp) {
enc = temp++;
temp = strstr(temp, "_enc");
}
if (enc == NULL || strlen(enc) != 4) {
sprintf(lastError, "Il file %s non e' un file cifrato.\n", src);
return 400;
}
char dst[PATHLEN] = "";
strncpy(dst, src, strlen(src) - 4);
LARGE_INTEGER dim;
HANDLE srcfile;
srcfile = CreateFile(src, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (srcfile == INVALID_HANDLE_VALUE) {
sprintf(lastError, "File %s non esistente.\n", src);
return 400;
}
if (!GetFileSizeEx(srcfile, &dim)) {
sprintf(lastError, "Errore nel calcolo dimensione del file %s.\n", src);
CloseHandle(srcfile);
return 500;
}
CloseHandle(srcfile);
int ret = fileXOR(src, dst, (long long)dim.QuadPart, seed);
if (ret == 200 && !DeleteFile(src)) {
sprintf(lastError, "Errore nella cancellazione del file %s.\n", src);
return 500;
}
return ret;
}
int listFolder(char folder[], SOCKET sock) {
WIN32_FIND_DATA find_data;
char dir[MAX_PATH];
LARGE_INTEGER dim;
HANDLE handle_find = INVALID_HANDLE_VALUE;
snprintf(dir,MAX_PATH,"%s\\*.*",folder);
handle_find = FindFirstFile(dir,&find_data);
if(handle_find == INVALID_HANDLE_VALUE){
sprintf(lastError, "Errore apertura directory %s.\n", folder);
return 400;
}
do {
char path[PATHLEN];
char entry[PATHLEN+50];
ZeroMemory(path,sizeof(path));
ZeroMemory(entry, sizeof(entry));
if (strcmp(find_data.cFileName,".")==0 || strcmp(find_data.cFileName,"..")==0 || find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
continue;
}
else {
dim.LowPart = find_data.nFileSizeLow;
dim.HighPart = find_data.nFileSizeHigh;
snprintf(path,PATHLEN,"%s/%s",folder,find_data.cFileName);
snprintf(entry,PATHLEN+50,"%llu %s", dim.QuadPart, path);
sendMessage(sock, entry);
sendMessage(sock, "\r\n");
}
} while (FindNextFile(handle_find,&find_data)!=0);
FindClose(handle_find);
return 200;
}
int listRecursive(char folder[], SOCKET sock) {
WIN32_FIND_DATA find_data;
char dir[MAX_PATH];
LARGE_INTEGER dim;
HANDLE handle_find = INVALID_HANDLE_VALUE;
snprintf(dir, MAX_PATH, "%s\\*.*", folder);
handle_find = FindFirstFile(dir, &find_data);
if (handle_find == INVALID_HANDLE_VALUE) {
sprintf(lastError, "Errore apertura directory %s.\n", folder);
return 400;
}
do {
char path[PATHLEN];
char entry[PATHLEN+50];
ZeroMemory(path, sizeof(path));
ZeroMemory(entry, sizeof(entry));
if (strcmp(find_data.cFileName, ".") == 0 || strcmp(find_data.cFileName, "..") == 0) {
continue;
}
else if(find_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY){
sprintf(path, "%s/%s", folder, find_data.cFileName);
int ret = listRecursive(path,sock);
if (ret != 200)
return ret;
}
else {
dim.LowPart = find_data.nFileSizeLow;
dim.HighPart = find_data.nFileSizeHigh;
sprintf(path, "%s/%s", folder, find_data.cFileName);
sprintf(entry, "%llu %s", dim.QuadPart, path);
sendMessage(sock, entry);
sendMessage(sock, "\r\n");
}
} while (FindNextFile(handle_find, &find_data) != 0);
FindClose(handle_find);
return 200;
}
int parseRequest(char folder[], char message[], SOCKET sock) {
WIN32_FIND_DATA dirdata;
char temp[MAX_PATH];
snprintf(temp,MAX_PATH,"%s\\*.*",folder);
HANDLE dir = FindFirstFile(temp,&dirdata);
if (dir == INVALID_HANDLE_VALUE) {
return 1;
}
FindClose(dir);
int ret = 0;
if (strstr(message, "LSTF") != NULL) {
sendMessage(sock, STATE_PENDING);
ret = listFolder(folder, sock);
sendMessage(sock, "\r\n.\r\n");
}
else if (strstr(message, "LSTR") != NULL) {
sendMessage(sock, STATE_PENDING);
ret = listRecursive(folder, sock);
sendMessage(sock, "\r\n.\r\n");
}
else if (strstr(message, "ENCR") != NULL) {
char s[4] = "";
unsigned int seed = -1;
char path[PATHLEN] = "errore";
sscanf(message, "%s %u %[^\n]%*s", s, &seed, path);
if (seed != -1 && strcmp(path, "errore") != 0) {
ret = encrypt(path, seed, sock);
}
}
else if (strstr(message, "DECR") != NULL) {
char s[4] = "";
unsigned int seed = -1;
char path[PATHLEN] = "errore";
sscanf(message, "%s %u %[^\n]%*s", s, &seed, path);
if (seed != -1 && strcmp(path, "errore") != 0) {
ret = decrypt(path, seed, sock);
}
}
//gestione codici di risposta
if (ret == 200) {
sendMessage(sock, STATE_OK);
}
else if (ret == 400) {
sendMessage(sock, lastError);
sendMessage(sock, STATE_ERROR);
}
else if (ret == 500) {
sendMessage(sock, lastError);
sendMessage(sock, STATE_UNAVAIL);
}
return ret;
}
int addRequest(SRWLOCK *mutex, CONDITION_VARIABLE *cond, char *folder, char *address, char *message, SOCKET sock) {
struct request *req = (struct request *)malloc(sizeof(struct request));
if (!req) {
char toLog[BUFSIZE] = "";
sprintf(toLog, "Errore malloc richiesta.\n");
writeLog(LOGFILE, toLog);
return 1;
}
AcquireSRWLockExclusive(mutex);
req->ID = nextReqID;
req->folder = folder;
req->address = address;
char buf[PATHLEN+100];
ZeroMemory(buf,sizeof(buf));
sprintf(buf,"%s",message);
req->message = buf;
req->sock = sock;
req->next = NULL;
char toLog[BUFSIZE] = "";
sprintf(toLog, "[Richiesta #%i] [%s] [%s]\n", nextReqID, address, message);
writeLog(LOGFILE, toLog);
//printf("[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message);
if (numReqs == 0)
first = req;
else
last->next = req;
last = req;
numReqs++;
WakeAllConditionVariable(cond);
ReleaseSRWLockExclusive(mutex);
nextReqID++;
return 0;
}
struct request* removeRequest(SRWLOCK *mutex) {
struct request *req;
AcquireSRWLockExclusive(mutex);
if (numReqs>0) {
req = first;
first = req->next;
if (first == NULL)
last = NULL;
numReqs--;
}
else {
req = NULL;
}
ReleaseSRWLockExclusive(mutex);
return req;
}
DWORD WINAPI task(void *arg) {
int *threadID = (int *)arg;
struct request *req;
while (run) {
AcquireSRWLockExclusive(&reqMutex);
int r = numReqs;
ReleaseSRWLockExclusive(&reqMutex);
if (r>0) {
req = removeRequest(&reqMutex);
if (req!=NULL) {
char *folder = req->folder;
char *message = req->message;
SOCKET sock = req->sock;
int reqID = req->ID;
//printf("[Richiesta #%i] [Thread #%i - assegnata]\n",reqID,*threadID);
int ret = parseRequest(folder, message, sock);
char toLog[BUFSIZE] = "";
sprintf(toLog, "[Richiesta #%i] [Thread #%i: %i]\n", reqID, *threadID, ret);
writeLog(LOGFILE, toLog);
//printf("[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret);
free(req);
closesocket(sock);
}
}
else {
AcquireSRWLockExclusive(&reqMutex);
SleepConditionVariableSRW(&reqCond,&reqMutex,INFINITE,0);
ReleaseSRWLockExclusive(&reqMutex);
}
}
return 0;
}
int executeServer(char folder[], unsigned short port, int threadNum) {
WIN32_FIND_DATA dirdata;
char temp[PATHLEN];
snprintf(temp,PATHLEN,"%s\\*.*",folder);
HANDLE dir = FindFirstFile(temp,&dirdata);
if (dir==INVALID_HANDLE_VALUE) {
char toLog[BUFSIZE] = "";
sprintf(toLog, "La cartella %s non e' una directory valida o non esiste.\n", folder);
writeLog(LOGFILE, toLog);
//printf("La cartella %s non e' una directory valida o non esiste.\n",folder);
return 1;
}
FindClose(dir);
WSADATA wsaData;
int res;
SOCKET serverSock;
struct addrinfo *result = NULL;
struct addrinfo serveraddr;
int sendRes;
char message[BUFSIZE];
int msglen;
char strPort[6];
snprintf(strPort,6,"%hu",port);
//inizializzazione WinSock
res = WSAStartup(MAKEWORD(2,2), &wsaData);
if (res!=0) {
printf("Errore WSAStartup: %i\n",res);
return 1;
}
ZeroMemory(&serveraddr,sizeof(serveraddr));
serveraddr.ai_family = AF_INET;
serveraddr.ai_socktype = SOCK_STREAM;
serveraddr.ai_protocol = IPPROTO_TCP;
serveraddr.ai_flags = AI_PASSIVE;
//crea socket in ascolto
res = getaddrinfo(NULL, strPort, &serveraddr, &result);
if (res!=0) {
printf("Errore getadddrinfo: %i\n",res);
WSACleanup();
return 1;
}
//Crea socket ora
serverSock = socket(result->ai_family,result->ai_socktype,result->ai_protocol);
if (serverSock==INVALID_SOCKET) {
printf("Errore socket: %ld\n",WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
//bind
res = bind(serverSock,result->ai_addr,(int)result->ai_addrlen);
if (res==SOCKET_ERROR) {
printf("Errore bind: %d\n",WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
freeaddrinfo(result);
//listen
res = listen(serverSock,SOMAXCONN);
if (res==SOCKET_ERROR) {
printf("Errore listen: %d\n",WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
//crea thread pool
int *threadID = malloc(threadNum*sizeof(int));
HANDLE *threads = malloc(threadNum*sizeof(HANDLE));
for (int i = 0; i<threadNum; i++) {
threadID[i] = i;
threads[i] = CreateThread(NULL, 0, task, &threadID[i], 0, NULL);
}
SOCKET clientSock;
struct sockaddr_in clientAddr;
//struct hostent *clientInfo;
unsigned int clientlen = sizeof(clientAddr);
//ricevi richiesta ed inseriscila in coda, da processare da un thread
while (true) {
//accept
clientSock = accept(serverSock, (struct sockaddr *)&clientAddr, &clientlen);
if (clientSock == INVALID_SOCKET) {
printf("Errore accept: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
//ottiene l'indirizzo del client
char clientAddrReadable[NI_MAXHOST];
if (getnameinfo((const struct sockaddr *)&clientAddr, clientlen, clientAddrReadable, sizeof(clientAddrReadable), NULL, sizeof(NULL), NI_NUMERICHOST) != 0) {
printf("Errore risoluzione client.\n");
freeaddrinfo(result);
closesocket(serverSock);
WSACleanup();
return 1;
}
ZeroMemory(message, BUFSIZE);
msglen = recv(clientSock, message, BUFSIZE, 0);
if (addRequest(&reqMutex, &reqCond, folder, clientAddrReadable, message, clientSock) != 0) {
freeaddrinfo(result);
break;
}
}
closesocket(serverSock);
WSACleanup();
return 0;
}
void showHelp(char *command) {
printf("server~ ");
if (strcmp(command, "-h") != 0)
printf("Comando non valido.\n\t");
printf("Usage: {comando_1} [valore_1] ... {comando_n} [valore_n]\n\t\
Ogni valore e' marcato come opzionale, ma puo' essere obbligatorio a seconda del comando che lo precede.\n\n\t\
Comandi (valori obbligatori):\n\t\
-c\t obbligatorio, specifica la cartella di partenza\n\t\
\t ignora la voce folder=<dir/to/start/with>\n\t\
-p\t specifica la porta TCP sulla quale restare in ascolto; default: 8888\n\t\
\t ignora la voce port=<portNum>\n\t\
-n\t specifica il numero di thread da utilizzare; default: 1\n\t\
\t ignora la voce threadNumber=<threadNum>\n\n\t\
Comandi (nessun valore necessario):\n\t\
-h\t mostra questo messaggio\n\n\t\
Dettagli:\n\t\
Tutti i parametri possono essere definiti tramite il file misc/server.conf, ma ignorati se specificati tramite riga di comando.\n\t\
In particolare, l'opzione -c non e' obbligatoria se la cartella e' specificata in tale file.\n");
return;
}
int main(int argc, char *argv[]) {
BOOL r = CreateDirectory("misc",NULL);
if (r != TRUE && GetLastError() != ERROR_ALREADY_EXISTS) {
printf("Errore creazione directory di log.\n");
return 1;
}
FILE *srvlog = fopen(LOGFILE, "w");
if (srvlog == NULL) {
printf("Errore creazione file di log.\n");
return 1;
}
fclose(srvlog);
memset(folder,0,PATHLEN);
port = 0;
threadNum = -1;
loadConfig(&port, folder, &threadNum);
if (argc>1) {
for (int i = 1; i<argc; i++) {
if (strcmp(argv[i], "-c") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
memset(folder, 0, PATHLEN);
strncpy(folder, argv[i + 1], strlen(argv[i + 1]));
i++;
}
else {
showHelp(argv[i]);
}
}
else if (strcmp(argv[i], "-p") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
port = (unsigned short)atoi(argv[i + 1]);
i++;
}
else {
showHelp(argv[i]);
}
}
else if (strcmp(argv[i], "-n") == 0) {
if (i + 1<argc && strstr(argv[i + 1], "-") == NULL) {
threadNum = atoi(argv[i + 1]);
i++;
}
else {
showHelp(argv[i]);
}
}
else
showHelp(argv[i]);
}
}
if (strcmp(folder, "\0") == 0) {
showHelp(argv[0]);
return 1;
}
//inizializzazione variabili globali
nextReqID = 0;
numReqs = 0;
InitializeSRWLock(&reqMutex);
InitializeConditionVariable(&reqCond);
run = true;
executeServer(folder,port,threadNum);
//fa terminare i thread in background
run = false;
return 0;
}
|
FRICP.h | #ifndef FRICP_H
#define FRICP_H
#include "ICP.h"
#include <AndersonAcceleration.h>
#include <eigen/unsupported/Eigen/MatrixFunctions>
#include "median.h"
#include <limits>
#define SAME_THRESHOLD 1e-6
#include <type_traits>
template<class T>
typename std::enable_if<!std::numeric_limits<T>::is_integer, bool>::type
almost_equal(T x, T y, int ulp)
{
// the machine epsilon has to be scaled to the magnitude of the values used
// and multiplied by the desired precision in ULPs (units in the last place)
return std::fabs(x-y) <= std::numeric_limits<T>::epsilon() * std::fabs(x+y) * ulp
// unless the result is subnormal
|| std::fabs(x-y) < std::numeric_limits<T>::min();
}
template<int N>
class FRICP
{
public:
typedef double Scalar;
typedef Eigen::Matrix<Scalar, N, Eigen::Dynamic> MatrixNX;
typedef Eigen::Matrix<Scalar, N, N> MatrixNN;
typedef Eigen::Matrix<Scalar, N+1, N+1> AffineMatrixN;
typedef Eigen::Transform<Scalar, N, Eigen::Affine> AffineNd;
typedef Eigen::Matrix<Scalar, N, 1> VectorN;
typedef nanoflann::KDTreeAdaptor<MatrixNX, N, nanoflann::metric_L2_Simple> KDtree;
typedef Eigen::Matrix<Scalar, 6, 1> Vector6;
double test_total_construct_time=.0;
double test_total_solve_time=.0;
int test_total_iters=0;
FRICP(){};
~FRICP(){};
private:
AffineMatrixN LogMatrix(const AffineMatrixN& T)
{
Eigen::RealSchur<AffineMatrixN> schur(T);
AffineMatrixN U = schur.matrixU();
AffineMatrixN R = schur.matrixT();
std::vector<bool> selected(N, true);
MatrixNN mat_B = MatrixNN::Zero(N, N);
MatrixNN mat_V = MatrixNN::Identity(N, N);
for (int i = 0; i < N; i++)
{
if (selected[i] && fabs(R(i, i) - 1)> SAME_THRESHOLD)
{
int pair_second = -1;
for (int j = i + 1; j <N; j++)
{
if (fabs(R(j, j) - R(i, i)) < SAME_THRESHOLD)
{
pair_second = j;
selected[j] = false;
break;
}
}
if (pair_second > 0)
{
selected[i] = false;
R(i, i) = R(i, i) < -1 ? -1 : R(i, i);
double theta = acos(R(i, i));
if (R(i, pair_second) < 0)
{
theta = -theta;
}
mat_B(i, pair_second) += theta;
mat_B(pair_second, i) += -theta;
mat_V(i, pair_second) += -theta / 2;
mat_V(pair_second, i) += theta / 2;
double coeff = 1 - (theta * R(i, pair_second)) / (2 * (1 - R(i, i)));
mat_V(i, i) += -coeff;
mat_V(pair_second, pair_second) += -coeff;
}
}
}
AffineMatrixN LogTrim = AffineMatrixN::Zero();
LogTrim.block(0, 0, N, N) = mat_B;
LogTrim.block(0, N, N, 1) = mat_V * R.block(0, N, N, 1);
AffineMatrixN res = U * LogTrim * U.transpose();
return res;
}
inline Vector6 RotToEuler(const AffineNd& T)
{
Vector6 res;
res.head(3) = T.rotation().eulerAngles(0,1,2);
res.tail(3) = T.translation();
return res;
}
inline AffineMatrixN EulerToRot(const Vector6& v)
{
MatrixNN s (Eigen::AngleAxis<Scalar>(v(0), Vector3::UnitX())
* Eigen::AngleAxis<Scalar>(v(1), Vector3::UnitY())
* Eigen::AngleAxis<Scalar>(v(2), Vector3::UnitZ()));
AffineMatrixN m = AffineMatrixN::Zero();
m.block(0,0,3,3) = s;
m(3,3) = 1;
m.col(3).head(3) = v.tail(3);
return m;
}
inline Vector6 LogToVec(const Eigen::Matrix4d& LogT)
{
Vector6 res;
res[0] = -LogT(1, 2);
res[1] = LogT(0, 2);
res[2] = -LogT(0, 1);
res[3] = LogT(0, 3);
res[4] = LogT(1, 3);
res[5] = LogT(2, 3);
return res;
}
inline AffineMatrixN VecToLog(const Vector6& v)
{
AffineMatrixN m = AffineMatrixN::Zero();
m << 0, -v[2], v[1], v[3],
v[2], 0, -v[0], v[4],
-v[1], v[0], 0, v[5],
0, 0, 0, 0;
return m;
}
double FindKnearestMed(const KDtree& kdtree,
const MatrixNX& X, int nk)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return sqrt(med);
}
/// Find self normal edge median of point cloud
double FindKnearestNormMed(const KDtree& kdtree, const Eigen::Matrix3Xd & X, int nk, const Eigen::Matrix3Xd & norm_x)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
for(int s = 1; s<nk; s++)
{
k_dist[s] = std::abs((X.col(id[s]) - X.col(id[0])).dot(norm_x.col(id[0])));
}
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return med;
}
template <typename Derived1, typename Derived2, typename Derived3>
AffineNd point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w) {
int dim = X.rows();
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::VectorXd X_mean(dim), Y_mean(dim);
for (int i = 0; i<dim; ++i) {
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
Y_mean(i) = (Y.row(i).array()*w_normalized.transpose().array()).sum();
}
X.colwise() -= X_mean;
Y.colwise() -= Y_mean;
/// Compute transformation
AffineNd transformation;
MatrixXX sigma = X * w_normalized.asDiagonal() * Y.transpose();
Eigen::JacobiSVD<MatrixXX> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if (svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
VectorN S = VectorN::Ones(dim); S(dim-1) = -1.0;
transformation.linear() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
}
else {
transformation.linear() = svd.matrixV()*svd.matrixU().transpose();
}
transformation.translation() = Y_mean - transformation.linear()*X_mean;
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += Y_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5>
Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& Norm,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::MatrixBase<Derived5>& u) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Block<Matrix66, 3, 3> Block33;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::Vector3d X_mean;
for (int i = 0; i<3; ++i)
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
X.colwise() -= X_mean;
Y.colwise() -= X_mean;
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Block33 TL = LHS.topLeftCorner<3, 3>();
Block33 TR = LHS.topRightCorner<3, 3>();
Block33 BR = LHS.bottomRightCorner<3, 3>();
Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3, X.cols());
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i<X.cols(); i++) {
C.col(i) = X.col(i).cross(Norm.col(i));
}
#pragma omp sections nowait
{
#pragma omp section
for (int i = 0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i));
#pragma omp section
for (int i = 0; i<X.cols(); i++) TR += (C.col(i)*Norm.col(i).transpose())*w(i);
#pragma omp section
for (int i = 0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(Norm.col(i), w(i));
#pragma omp section
for (int i = 0; i<C.cols(); i++) {
double dist_to_plane = -((X.col(i) - Y.col(i)).dot(Norm.col(i)) - u(i))*w(i);
RHS.head<3>() += C.col(i)*dist_to_plane;
RHS.tail<3>() += Norm.col(i)*dist_to_plane;
}
}
}
LHS = LHS.selfadjointView<Eigen::Upper>();
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::LDLT<Matrix66> ldlt(LHS);
RHS = ldlt.solve(RHS);
transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) *
Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ());
transformation.translation() = RHS.tail<3>();
/// Apply transformation
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += X_mean;
transformation.translation() += X_mean - transformation.linear()*X_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4>
double point_to_plane_gaussnewton(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& norm_y,
const Eigen::MatrixBase<Derived4>& w,
Matrix44 Tk, Vector6& dir) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 12, 6> Matrix126;
typedef Eigen::Matrix<double, 9, 3> Matrix93;
typedef Eigen::Block<Matrix126, 9, 3> Block93;
typedef Eigen::Block<Matrix126, 3, 3> Block33;
typedef Eigen::Matrix<double, 12, 1> Vector12;
typedef Eigen::Matrix<double, 9, 1> Vector9;
typedef Eigen::Matrix<double, 4, 2> Matrix42;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Vector6 log_T = LogToVec(LogMatrix(Tk));
Matrix33 B = VecToLog(log_T).block(0, 0, 3, 3);
double a = log_T[0];
double b = log_T[1];
double c = log_T[2];
Matrix33 R = Tk.block(0, 0, 3, 3);
Vector3 t = Tk.block(0, 3, 3, 1);
Vector3 u = log_T.tail(3);
Matrix93 dbdw = Matrix93::Zero();
dbdw(1, 2) = dbdw(5, 0) = dbdw(6, 1) = -1;
dbdw(2, 1) = dbdw(3, 2) = dbdw(7, 0) = 1;
Matrix93 db2dw = Matrix93::Zero();
db2dw(3, 1) = db2dw(4, 0) = db2dw(6, 2) = db2dw(8, 0) = a;
db2dw(0, 1) = db2dw(1, 0) = db2dw(7, 2) = db2dw(8, 1) = b;
db2dw(0, 2) = db2dw(2, 0) = db2dw(4, 2) = db2dw(5, 1) = c;
db2dw(1, 1) = db2dw(2, 2) = -2 * a;
db2dw(3, 0) = db2dw(5, 2) = -2 * b;
db2dw(6, 0) = db2dw(7, 1) = -2 * c;
double theta = std::sqrt(a*a + b*b + c*c);
double st = sin(theta), ct = cos(theta);
Matrix42 coeff = Matrix42::Zero();
if (theta>SAME_THRESHOLD)
{
coeff << st / theta, (1 - ct) / (theta*theta),
(theta*ct - st) / (theta*theta*theta), (theta*st - 2 * (1 - ct)) / pow(theta, 4),
(1 - ct) / (theta*theta), (theta - st) / pow(theta, 3),
(theta*st - 2 * (1 - ct)) / pow(theta, 4), (theta*(1 - ct) - 3 * (theta - st)) / pow(theta, 5);
}
else
coeff(0, 0) = 1;
Matrix93 tempB3;
tempB3.block<3, 3>(0, 0) = a*B;
tempB3.block<3, 3>(3, 0) = b*B;
tempB3.block<3, 3>(6, 0) = c*B;
Matrix33 B2 = B*B;
Matrix93 temp2B3;
temp2B3.block<3, 3>(0, 0) = a*B2;
temp2B3.block<3, 3>(3, 0) = b*B2;
temp2B3.block<3, 3>(6, 0) = c*B2;
Matrix93 dRdw = coeff(0, 0)*dbdw + coeff(1, 0)*tempB3
+ coeff(2, 0)*db2dw + coeff(3, 0)*temp2B3;
Vector9 dtdw = coeff(0, 1) * dbdw*u + coeff(1, 1) * tempB3*u
+ coeff(2, 1) * db2dw*u + coeff(3, 1)*temp2B3*u;
Matrix33 dtdu = Matrix33::Identity() + coeff(2, 0)*B + coeff(2, 1) * B2;
Eigen::VectorXd rk(X.cols());
Eigen::MatrixXd Jk(X.cols(), 6);
#pragma omp for
for (int i = 0; i < X.cols(); i++)
{
Vector3 xi = X.col(i);
Vector3 yi = Y.col(i);
Vector3 ni = norm_y.col(i);
double wi = sqrt(w_normalized[i]);
Matrix33 dedR = wi*ni * xi.transpose();
Vector3 dedt = wi*ni;
Vector6 dedx;
dedx(0) = (dedR.cwiseProduct(dRdw.block(0, 0, 3, 3))).sum()
+ dedt.dot(dtdw.head<3>());
dedx(1) = (dedR.cwiseProduct(dRdw.block(3, 0, 3, 3))).sum()
+ dedt.dot(dtdw.segment<3>(3));
dedx(2) = (dedR.cwiseProduct(dRdw.block(6, 0, 3, 3))).sum()
+ dedt.dot(dtdw.tail<3>());
dedx(3) = dedt.dot(dtdu.col(0));
dedx(4) = dedt.dot(dtdu.col(1));
dedx(5) = dedt.dot(dtdu.col(2));
Jk.row(i) = dedx.transpose();
rk[i] = wi * ni.dot(R*xi-yi+t);
}
LHS = Jk.transpose() * Jk;
RHS = -Jk.transpose() * rk;
Eigen::CompleteOrthogonalDecomposition<Matrix66> cod_(LHS);
dir = cod_.solve(RHS);
double gTd = -RHS.dot(dir);
return gTd;
}
public:
void point_to_point(MatrixNX& X, MatrixNX& Y, VectorN& source_mean,
VectorN& target_mean, ICP::Parameters& par){
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
MatrixNX Q = MatrixNX::Zero(N, X.cols());
VectorX W = VectorX::Zero(X.cols());
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
MatrixXX To1 = T.matrix();
MatrixXX To2 = T.matrix();
int nPoints = X.cols();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd SVD_T = T;
double energy = .0, last_energy = std::numeric_limits<double>::max();
//ground truth point clouds
MatrixNX X_gt = X;
if(par.has_groundtruth)
{
VectorN temp_trans = par.gt_trans.col(N).head(N);
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, N, N) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
//output para
std::string file_out = par.out_path;
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
// dynamic welsch paras
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Find initial closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
if(par.f == ICP::WELSCH)
{
//dynamic welsch, calc k-nearest points with itself;
nu2 = par.nu_end_k * FindKnearestMed(kdtree, Y, 7);
double med1;
igl::median(W, med1);
nu1 = par.nu_begin_k * med1;
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
//AA init
accelerator_.init(par.anderson_m, (N + 1) * (N + 1), LogMatrix(T.matrix()).data());
begin_time = omp_get_wtime();
bool stop1 = false;
while(!stop1)
{
/// run ICP
int icp = 0;
for (; icp<par.max_icp; ++icp)
{
bool accept_aa = false;
energy = get_energy(par.f, W, nu1);
if (par.use_AA)
{
if (energy < last_energy) {
last_energy = energy;
accept_aa = true;
}
else{
accelerator_.replace(LogMatrix(SVD_T.matrix()).data());
//Re-find the closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = SVD_T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
last_energy = get_energy(par.f, W, nu1);
}
}
else
last_energy = energy;
end_time = omp_get_wtime();
run_time = end_time - begin_time;
if(par.has_groundtruth)
{
gt_mse = (T*X - X_gt).squaredNorm()/nPoints;
}
// save results
energys.push_back(last_energy);
times.push_back(run_time);
gt_mses.push_back(gt_mse);
if (par.print_energy)
std::cout << "icp iter = " << icp << ", Energy = " << last_energy
<< ", time = " << run_time << std::endl;
robust_weight(par.f, W, nu1);
// Rotation and translation update
T = point_to_point(X, Q, W);
//Anderson Acc
SVD_T = T;
if (par.use_AA)
{
AffineMatrixN Trans = (Eigen::Map<const AffineMatrixN>(accelerator_.compute(LogMatrix(T.matrix()).data()).data(), N+1, N+1)).exp();
T.linear() = Trans.block(0,0,N,N);
T.translation() = Trans.block(0,N,N,1);
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i) ;
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
/// Stopping criteria
double stop2 = (T.matrix() - To2).norm();
To2 = T.matrix();
if(stop2 < par.stop)
{
break;
}
}
if(par.f!= ICP::WELSCH)
stop1 = true;
else
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogMatrix(T.matrix()).data());
last_energy = std::numeric_limits<double>::max();
}
}
}
///calc convergence energy
last_energy = get_energy(par.f, W, nu1);
X = T * X;
gt_mse = (X-X_gt).squaredNorm()/nPoints;
T.translation() += - T.rotation() * source_mean + target_mean;
X.colwise() += target_mean;
///save convergence result
par.convergence_energy = last_energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
//output time and energy
out_res.precision(16);
for (int i = 0; i<times.size(); i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
///dynamic welsch, calc k-nearest points with itself;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd LG_T = T;
double energy = 0.0, prev_res = std::numeric_limits<double>::max(), res = 0.0;
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
while(!stop1)
{
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
bool accept_aa = false;
energy = get_energy(par.f, W, par.p);
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(energy);
times.push_back(run_time);
Eigen::VectorXd test_w = (X-Qp).colwise().norm();
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, par.p);
/// Rotation and translation update
T = point_to_plane(X, Qp, Qn, W, Eigen::VectorXd::Zero(X.cols()))*T;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", energy = " << energy << std::endl;
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, par.p);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane_GN(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse;
///dynamic welsch, calc k-nearest points with itself;
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
Vector6 LG_T;
Vector6 Dir;
//add time test
double energy = 0.0, prev_energy = std::numeric_limits<double>::max();
if(par.use_AA)
{
Eigen::Matrix4d log_T = LogMatrix(T.matrix());
LG_T = LogToVec(log_T);
accelerator_.init(par.anderson_m, 6, LG_T.data());
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.f == ICP::WELSCH)
{
double med1;
igl::median(W, med1);
nu1 =par.nu_begin_k * med1;
nu2 = par.nu_end_k * FindKnearestNormMed(kdtree, Y, 7, norm_y);
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
par.max_icp = 6;
while(!stop1)
{
par.max_icp = std::min(par.max_icp+1, 10);
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
int n_linsearch = 0;
energy = get_energy(par.f, W, nu1);
if(par.use_AA)
{
if(energy < prev_energy)
{
prev_energy = energy;
}
else
{
// line search
double alpha = 0.0;
Vector6 new_t = LG_T;
Eigen::VectorXd lowest_W = W;
Eigen::Matrix3Xd lowest_Qp = Qp;
Eigen::Matrix3Xd lowest_Qn = Qn;
Eigen::Affine3d lowest_T = T;
n_linsearch++;
alpha = 1;
new_t = LG_T + alpha * Dir;
T.matrix() = VecToLog(new_t).exp();
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double test_energy = get_energy(par.f, W, nu1);
if(test_energy < energy)
{
accelerator_.reset(new_t.data());
energy = test_energy;
}
else
{
Qp = lowest_Qp;
Qn = lowest_Qn;
W = lowest_W;
T = lowest_T;
}
prev_energy = energy;
}
}
else
{
prev_energy = energy;
}
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(prev_energy);
times.push_back(run_time);
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, nu1);
/// Rotation and translation update
point_to_plane_gaussnewton(ori_X, Qp, Qn, W, T.matrix(), Dir);
LG_T = LogToVec(LogMatrix(T.matrix()));
LG_T += Dir;
T.matrix() = VecToLog(LG_T).exp();
// Anderson acc
if(par.use_AA)
{
Vector6 AA_t;
AA_t = accelerator_.compute(LG_T.data());
T.matrix() = VecToLog(AA_t).exp();
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", nu1 = " << nu1 << ", acept_aa= " << n_linsearch
<< ", energy = " << prev_energy << std::endl;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
if(par.f == ICP::WELSCH)
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2 ? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogToVec(LogMatrix(T.matrix())).data());
prev_energy = std::numeric_limits<double>::max();
}
}
else
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, nu1);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
};
#endif
|
core_chegst.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zhegst.c, normal z -> c, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_hegst
*
* Reduces a complex Hermitian-definite generalized eigenproblem to standard
* form.
*
* If ITYPE = 1, the problem is A*x = lambda*B*x,
* and A is overwritten by inv(U^H)*A*inv(U) or inv(L)*A*inv(L^H)
*
* If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or
* B*A*x = lambda*x, and A is overwritten by U*A*U^H or L^H*A*L.
*
*******************************************************************************
*
* @param[in] itype
* = 1: compute inv(U^H)*A*inv(U) or inv(L)*A*inv(L^H);
* = 2 or 3: compute U*A*U^H or L^H*A*L.
*
* @param[in] uplo
* If PlasmaUpper, upper triangle of A is stored and B is factored as
* U^H*U;
* If PlasmaLower, lower triangle of A is stored and B is factored as
* L*L^H.
*
* @param[in] n
* The order of the matrices A and B. N >= 0.
*
* @param[in,out] A
* On entry, the Hermitian matrix A. If UPLO = 'U', the leading
* N-by-N upper triangular part of A contains the upper
* triangular part of the matrix A, and the strictly lower
* triangular part of A is not referenced. If UPLO = 'L', the
* leading N-by-N lower triangular part of A contains the lower
* triangular part of the matrix A, and the strictly upper
* triangular part of A is not referenced.
*
* On exit, if INFO = 0, the transformed matrix, stored in the
* same format as A.
*
* @param[in] lda
* The leading dimension of the array A. LDA >= max(1,N).
*
* @param[in,out] B
* The triangular factor from the Cholesky factorization of B,
* as returned by CPOTRF.
*
* @param[in] ldb
* The leading dimension of the array B. LDB >= max(1,N).
*
******************************************************************************/
__attribute__((weak))
int plasma_core_chegst(int itype, plasma_enum_t uplo,
int n,
plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb)
{
int info = LAPACKE_chegst_work(
LAPACK_COL_MAJOR,
itype,
lapack_const(uplo),
n, A, lda, B, ldb );
return info;
}
/******************************************************************************/
void plasma_core_omp_chegst(int itype, plasma_enum_t uplo,
int n,
plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n]) \
depend(in:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_chegst(itype, uplo,
n,
A, lda,
B, ldb);
}
}
|
Ant-Colony.c | /*
Author: Makarios Christakis
Description:
Parallel implementation of the ant colony optimization algorithm for the
travelling salesman problem. The criterion by which the algorithm converges
is the %change in the average distance travelled by all the ant agents.
For the parameters below, the algorithm converges after 3 iterations
with:
Min Path Length: 4458074.00
Average Path Length: 4501838.00
Timed using time() on a 7th gen i7, Ubuntu 18.04 machine we get:
real 19m12,759s
user 129m38,110s
sys 0m1,071s
The serial implementation ran for about 80 minutes on the same machine
so we have reduced the runtime by a factor of 4.
*/
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// **********************************************************
// DEFINITIONS
#define N_POINTS 10000 //Number of cities to generate
#define N_AGENTS 8 // Number of ant agents
#define P 0.5 // Pheromone evaporation rate
#define PHEROMONE_INIT_VAL (float)1 // Initial pheromone values
// **********************************************************
// STRUCTS
struct AntAgent {
float pathLength; //distance travelled
int city_flags[N_POINTS]; //available = 1, unavailable = 0
int route[N_POINTS];
int initialCity;
int currentCity;
};
// **********************************************************
// GLOBAL VARS
float cities[N_POINTS][2] = {0};
float minPathLength = 0;
float avgPathLength = 0;
struct AntAgent ants[N_AGENTS];
float pheromones[N_POINTS][N_POINTS];
unsigned int seed = 159852753;
// **********************************************************
// Random unsigned int generator
unsigned int randUint() {
#pragma omp threadprivate(seed)
seed = seed * 1103515245 + 12345;
return seed;
}
// **********************************************************
// Initialises the city coordinate vectors.
void initVec() {
for (int i = 0; i < N_POINTS; i++) {
cities[i][0] = (float)rand() / RAND_MAX * 1e3;
cities[i][1] = (float)rand() / RAND_MAX * 1e3;
}
}
// Initialises pheromones.
void initPheromones() {
#pragma omp parallel for
for (int i = 0; i < N_POINTS; i++) {
for (int j = 0; j < N_POINTS; j++) {
pheromones[i][j] = PHEROMONE_INIT_VAL;
}
}
}
// Resets each ant's parameters.
void resetAgents() {
for (int i = 0; i < N_AGENTS; i++) {
ants[i].pathLength = 0;
memset(&ants[i].city_flags[0], 1, N_POINTS * sizeof(int));
int register tmp = (int)rand() % N_POINTS;
ants[i].initialCity = tmp;
ants[i].currentCity = tmp;
ants[i].route[0] = tmp;
ants[i].city_flags[tmp] = 0;
}
}
// **********************************************************
// Euclidean distance calculation between 2 points in the grid.
float dist(int p1, int p2) {
float register dx = cities[p1][0] - cities[p2][0];
float register dy = cities[p1][1] - cities[p2][1];
return (float)sqrt(dx * dx + dy * dy);
}
// Make each agent run through all the cities according to the algorithm's rules.
void releaseAgents() {
#pragma omp parallel for
for (int i = 0; i < N_AGENTS; i++) {
float city_probs[N_POINTS] = {0};
for (int j = 0; j < N_POINTS - 1; j++) {
int register curr = ants[i].currentCity;
float prob = (float)randUint() / __UINT32_MAX__;
float denominator = 0;
//First pass from all available cities.
for (int k = 0; k < N_POINTS; k++) {
if (ants[i].city_flags[k]) {
float len = dist(curr, k);
len = 1.0 / len;
float register tmp = sqrt(pheromones[curr][k] * len);
/*ants[i].*/ city_probs[k] = tmp;
denominator += tmp;
}
}
prob *= denominator;
float cumulativeProb = 0;
// Probabilistic choice of next city to visit.
for (int k = 0; k < N_POINTS; k++) {
if (ants[i].city_flags[k]) {
cumulativeProb += city_probs[k];
if (prob < cumulativeProb) {
// Move to city
ants[i].city_flags[k] = 0;
ants[i].pathLength += dist(curr, k);
ants[i].currentCity = k;
ants[i].route[j + 1] = k;
break;
}
}
}
}
//printf("agent = %d\tcurrCity = %d\tinitCity = %d\n",i,ants[i].currentCity,ants[i].initialCity);
ants[i].pathLength += dist(ants[i].currentCity, ants[i].initialCity);
}
}
// Calculates the new values of all the pheromones.
void updatePheromones() {
#pragma omp parallel for
for (int i = 0; i < N_POINTS; i++) {
for (int j = 0; j < N_POINTS; j++) {
//For each path between two points calculate the sum
//of the distances of all ants that passed from it
float sumDist = 0;
for (int k = 0; k < N_AGENTS; k++) {
for (int q = 0; q < N_POINTS; q++) {
if (ants[k].route[q] == j) {
if (ants[k].route[q - 1] == i) {
sumDist += ants[k].pathLength;
}
break;
}
}
}
if (sumDist != 0)
pheromones[i][j] = (1 - P) * pheromones[i][j] + 1.0 / sumDist;
else
pheromones[i][j] = (1 - P) * pheromones[i][j];
}
}
}
int main() {
#pragma omp threadprivate(seed)
float prevAvg = 1e9;
float sum = 0;
int iter = 1; //iteration number
initVec();
initPheromones();
printf("INITIALIZED EVERYTHING\n");
do {
resetAgents();
releaseAgents();
updatePheromones();
minPathLength = ants[0].pathLength;
sum = ants[0].pathLength;
for (int i = 1; i < N_AGENTS; i++) {
if (ants[i].pathLength < minPathLength) {
minPathLength = ants[i].pathLength;
}
sum += ants[i].pathLength;
}
prevAvg = avgPathLength;
avgPathLength = sum / N_AGENTS;
iter++;
} while (abs(avgPathLength - prevAvg) / prevAvg > 0.01);
printf("Iterations: %d\tMin Path Length: %.2f\tAverage Path: %.2f\n", iter, minPathLength, avgPathLength);
return 0;
} |
tp3.c |
/////////////////////////////// 8INF854 - ARCHITECTURES PARRALLELES - DEVOIR #3 ////////////////////////////////////////
///////////////////////////// tri tableau MPI - Corentin RAOULT - Adrien Cambillau /////////////////////////////////////
//http://www.cac.cornell.edu/vw/MPIoneSided/exercise.aspx
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <time.h>
#include <limits.h>
#define BLOCK_LOW(id,p,n) ((id)*(n)/(p))
#define BLOCK_HIGH(id,p,n) \
(BLOCK_LOW((id)+1,p,n)-1)
#define BLOCK_SIZE(id,p,n) \
(BLOCK_LOW((id)+1,p,n)-BLOCK_LOW(id,p,n))
#define BLOCK_OWNER(index,p,n) \
(((p)*(index)+1)-1)/(n))
struct tableau
{
int * tab;
int taille;
};
//variable globale sale accessible de partout
MPI_Win win;
MPI_Comm comm;
////////////////////// déclaration des fonctions ////////////////////////////////////////////////////////////////////
int demandeNombre();
void remplirTABrand( struct tableau T);
struct tableau TD_init(int n);
int TD_get(struct tableau T, int i, int *x);
int TD_put(struct tableau T, int i, int *x);
int TD_somme(struct tableau T); // Retourne la somme des éléments de T .
void TD_afficher(struct tableau T,int i, int j); // Affiche dans l’ordre les éléments i (i ≤ j) du tableau T .
void afficherNomMachine();
void triFusionParallele(int * TAB, int n);
void fusion(int * U, int taille_U, int * V, int taille_V , int * T);
void afficherTAB(int* TAB, int n);
void getPartTab(struct tableau T, int debut, int fin);
///////////////////// MAIN ///////////////////////////////////////////////////////////////////////////////////
int main (int argc, char **argv)
{
int n = atoi(argv[1]);
int rank, i,p;
//double time, max_time;
//time = -MPI_Wtime();
//Start up MPI...
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Group comm_group, group;
comm = MPI_COMM_WORLD;
struct tableau T;
if (rank == 0) {
/* Only rank 0 has a nonzero buffer at start */
T = TD_init(n);
}
else {
T.tab = calloc(n, sizeof(int));//Initialize all buffers to 0
/* Others only retrieve, so these windows can be size 0 */
MPI_Win_create(T.tab,n*sizeof(int)+5,sizeof(int),MPI_INFO_NULL,comm,&win);
}
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
getPartTab(T,BLOCK_LOW(rank-1,4,n),BLOCK_HIGH(rank-1,4,n)+1);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
if(rank!=0)
{
//#pragma omp parallel
triFusionParallele(T.tab+(rank-1)*(n/4), n/4);
}
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
MPI_Win_fence(MPI_MODE_NOPRECEDE,win);
if(rank!=0){
for(i=BLOCK_LOW(rank-1,4,n);i<BLOCK_HIGH(rank-1,4,n)+1;i++)
{
//printf("%d ",T.tab[i] );
TD_put(T, i, &T.tab[i]);
}
}
MPI_Win_fence(MPI_MODE_NOSUCCEED,win);
getPartTab(T,0,n);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
if(rank==0 || rank==1)
{
int * U = malloc((n/4+1)*sizeof(int));
int * V = malloc((n/4+1)*sizeof(int));
for(i=0; i<n/4;i++)
{
U[i]=T.tab[i+rank*(n/2)];
V[i]=T.tab[i+(n/4)+rank*(n/2)];
}
//*(U+n/2)=*(V+n/2)=INT_MAX;
fusion(U,n/4,V,n/4,T.tab+rank*(n/2));
//afficherTAB(T.tab, n);
free(U);
free(V);
}
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
MPI_Win_fence(MPI_MODE_NOPRECEDE,win);
if(rank==0 || rank==1)
{
for(i=rank*(n/2);i<(rank+1)*(n/2);i++)
{
TD_put(T, i, &T.tab[i]);
}
}
MPI_Win_fence(MPI_MODE_NOSUCCEED,win);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
getPartTab(T,0,n);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
if(rank==0){
int * leftTab = malloc((n/2)*sizeof(int));
int * rightTab = malloc((n/2)*sizeof(int));
for(i=0; i<n/2;i++)
{
leftTab[i]=T.tab[i];
rightTab[i]=T.tab[i+(n/2)];
}
fusion(leftTab, n/2, rightTab, n/2, T.tab);
free(leftTab);
free(rightTab);
}
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
MPI_Win_free(&win);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
if(rank == 0)
{
TD_afficher(T,0,n);
printf("somme = %d\n",TD_somme(T));
}
//time += MPI_Wtime();
//MPI_Reduce (&time, &max_time, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
//if (!rank)
// printf ("tri: processus: %d, secondes: %6.2f \n",p, max_time);
MPI_Barrier(MPI_COMM_WORLD);///////////////////////////////barrier///////////////////////////////////////////////////////////////////////////////
free(T.tab);
//Shut down...
MPI_Finalize();
return(0);
}
/////////////////// développement des fonctions /////////////////////////////////////////////////////////////
int demandeNombre()
{
int i; char buf[128] = {0};
//tant que l'entrée n'est pas correcte on la redemande
while(scanf("%d", &i) != 1)
{
scanf("%s", &buf);
printf("Désolé, [%s] n'est pas un nombre, veuillez taper une valeur correcte : ", &buf);
}
return i;
}
void afficherNomMachine()
{
char hostname[256];
if (gethostname(hostname, sizeof(hostname)) == 0)
{
printf("%s\n", hostname);fflush(stdout);
}
else
fprintf(stderr, "La fonction gethostname a echoue.\n");
}
void remplirTABrand(struct tableau T)
{
int i;
srand(time(NULL));
for(i=0;i<T.taille;i++)
T.tab[i] = rand()%T.taille;
}
struct tableau TD_init(int n)
{
struct tableau T;
T.tab = malloc(n*sizeof(int));
T.taille=n;
remplirTABrand(T);
/* Everyone will retrieve from the buffer on root */
MPI_Win_create(T.tab,n*sizeof(int),sizeof(int),MPI_INFO_NULL,comm,&win);
printf("Tableau original\n");fflush(stdout);
TD_afficher(T,0, T.taille);fflush(stdout);
return T;
}
int TD_get(struct tableau T, int i, int *x)
{
if(i < T.taille)
{
MPI_Get(x,1,MPI_INT,0,i,1,MPI_INT,win);
return 1;
}
else
return 0;
}
int TD_put(struct tableau T, int i, int *x)
{
if(i < T.taille)
{
MPI_Put(x,1,MPI_INT,0,i,1,MPI_INT,win);
return 1;
}
else
return 0;
}
int TD_somme(struct tableau T)
{
int i;
int somme=0;
for(i=0;i<T.taille;i++)
somme+=T.tab[i];
return somme;
}
void TD_afficher(struct tableau T,int i, int j)
{
int c;
printf("tab: {");
for(c=i;c<j;c++)
{
printf(" %d ", T.tab[c]);
}
printf("}\n");
}
void triFusionParallele(int * TAB, int n)
{
int i;
int * U = malloc((n/2)*sizeof(int));
int * V = malloc((n/2)*sizeof(int));
for(i=0; i<n/2;i++)
{
U[i]=TAB[i];
V[i]=TAB[i+(n/2)];
}
if(n>=2)//si n==1 pas besoin de trier les tableaux
{
//#pragma omp single nowait
//{
//#pragma omp task
triFusionParallele(U,n/2);
//#pragma omp task
triFusionParallele(V,n/2);
//#pragma omp taskwait
fusion(U,n/2,V,n/2,TAB);
//}
}
free(U);
free(V);
}
void fusion(int * U, int taille_U, int * V, int taille_V , int * T)
{
int i=0,j=0;
int k;
*(U+taille_U)=INT_MAX;
*(V+taille_V)=INT_MAX;
for(k=0; k<(taille_U+taille_V);k++)
{
if (U[i]<V[j])
T[k]=U[i++];
else
T[k]=V[j++];
}
}
void afficherTAB(int* TAB, int n)
{
int j;
printf("TAB : { ");
for(j = 0; j < n; j++)
printf(" [%d] ",TAB[j]);
printf(" }\n");
}
void getPartTab(struct tableau T, int debut, int fin)
{
MPI_Win_fence(MPI_MODE_NOPRECEDE,win);
int i;
for(i=debut;i<fin;i++)
TD_get(T, i, &T.tab[i]);
MPI_Win_fence(MPI_MODE_NOSUCCEED,win);
}
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
double a[1000][1000], v[1000], v_out[1000];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<1000; i ++ )
{
#pragma cetus lastprivate(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for lastprivate(j)
for (j=0; j<1000; j ++ )
{
a[i][j]=((i*j)+0.01);
}
v_out[i]=((i*j)+0.01);
v[i]=((i*j)+0.01);
}
_ret_val_0=0;
return _ret_val_0;
}
void mv()
{
int i, j;
#pragma cetus private(i, j)
#pragma loop name mv#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<1000; i ++ )
{
double sum = 0.0;
#pragma cetus private(j)
#pragma loop name mv#0#0
#pragma cetus reduction(+: sum)
#pragma cetus parallel
#pragma omp parallel for private(j) reduction(+: sum)
for (j=0; j<1000; j ++ )
{
sum+=(a[i][j]*v[j]);
}
v_out[i]=sum;
}
return ;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<1000; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<1000; j ++ )
{
printf("%lf\n", a[i][j]);
}
printf("%lf\n", v_out[i]);
printf("%lf\n", v[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
mv();
print();
_ret_val_0=0;
return _ret_val_0;
}
|
window.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : window.c
* Description : window
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_window_c__
#define __libaroma_window_c__
#include <aroma_internal.h>
#include "ui_internal.h"
/* check wm macro */
#define __CHECK_WM(RETVAL) \
if (libaroma_wm()==NULL){ \
ALOGW("window manager uninitialized"); \
return RETVAL; \
}
/*
* Variable : _libaroma_window_measurement_dp
* Type : byte
* Descriptions: default measurement
*/
static byte _libaroma_window_measurement_dp=1;
/*
* Function : libaroma_window_usedp
* Return Value: byte
* Descriptions: use dp for measurement
*/
byte libaroma_window_usedp(byte isdp){
if (isdp==1){
_libaroma_window_measurement_dp=1;
}
else if (!isdp){
_libaroma_window_measurement_dp=0;
}
return _libaroma_window_measurement_dp;
} /* End of libaroma_window_usedp */
/*
* Function : libaroma_window_measure_point
* Return Value: int
* Descriptions: mesure point
*/
int libaroma_window_measure_point(int x){
if (_libaroma_window_measurement_dp){
return libaroma_dp(x);
}
return x;
} /* End of libaroma_window_measure_point */
/*
* Function : _libaroma_window_measure_save
* Return Value: void
* Descriptions: save measurement value
*/
void _libaroma_window_measure_save(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){
if (win!=NULL){
if (_libaroma_window_measurement_dp){
win->left = libaroma_px(win->x);
win->top = libaroma_px(win->y);
win->width= libaroma_px(win->w);
win->height= libaroma_px(win->h);
}
else{
win->left = win->x;
win->top = win->y;
win->width= win->w;
win->height= win->h;
}
}
if (ctl!=NULL){
if (_libaroma_window_measurement_dp){
ctl->left = libaroma_px(ctl->x);
ctl->top = libaroma_px(ctl->y);
ctl->width= libaroma_px(ctl->w);
ctl->height= libaroma_px(ctl->h);
}
else{
ctl->left = ctl->x;
ctl->top = ctl->y;
ctl->width= ctl->w;
ctl->height= ctl->h;
}
}
} /* End of _libaroma_window_measure_save */
/*
* Function : libaroma_window_measure_calculate
* Return Value: int
* Descriptions: calculate measurement
*/
int libaroma_window_measure_calculate(
int cv, int pos, int max, int is_size, int x){
if (is_size){
if (pos<=0){
switch (pos){
case LIBAROMA_POS_HALF: return (max / 2)-x; break;
case LIBAROMA_POS_1P3: return (max / 3)-x; break;
case LIBAROMA_POS_2P3: return (max * 2 / 3)-x; break;
case LIBAROMA_POS_1P4: return (max / 4)-x; break;
case LIBAROMA_POS_3P4: return (max * 3 / 4)-x; break;
case LIBAROMA_SIZE_FULL: return max; break;
case LIBAROMA_SIZE_HALF: return max / 2; break;
case LIBAROMA_SIZE_THIRD: return max / 3; break;
case LIBAROMA_SIZE_QUARTER: return max / 4; break;
default: return abs(pos);
}
}
}
else{
if (pos<0){
switch (pos){
case LIBAROMA_POS_HALF: return max / 2; break;
case LIBAROMA_POS_1P3: return max / 3; break;
case LIBAROMA_POS_2P3: return max * 2 / 3; break;
case LIBAROMA_POS_1P4: return max / 4; break;
case LIBAROMA_POS_3P4: return max * 3 / 4; break;
default: return abs(pos);
}
}
}
return cv;
} /* End of libaroma_window_measure_calculate */
/*
* Function : libaroma_window_measure_size
* Return Value: byte
* Descriptions: measure window size
*/
byte libaroma_window_measure_size(LIBAROMA_WINDOWP win){
if (win){
if (win->parent!=NULL){
ALOGW("window_resize cannot be used for child window");
return 0;
}
if (_libaroma_window_measurement_dp){
win->x = libaroma_dp(win->rx);
win->y = libaroma_dp(win->ry);
win->w = libaroma_dp(win->rw);
win->h = libaroma_dp(win->rh);
}
else{
win->x = win->rx;
win->y = win->ry;
win->w = win->rw;
win->h = win->rh;
}
win->ax=win->x;
win->ay=win->y;
win->x=libaroma_window_measure_calculate(
win->x, win->rx, libaroma_wm()->w, 0, 0
);
win->y=libaroma_window_measure_calculate(
win->y, win->ry, libaroma_wm()->h, 0, 0
);
win->w=libaroma_window_measure_calculate(
win->w, win->rw, libaroma_wm()->w, 1, win->x
);
win->h=libaroma_window_measure_calculate(
win->h, win->rh, libaroma_wm()->h, 1, win->y
);
if (win->w+win->x>libaroma_wm()->w){
win->w = libaroma_wm()->w-win->x;
}
if (win->h+win->y>libaroma_wm()->h){
win->h = libaroma_wm()->h-win->y;
}
_libaroma_window_measure_save(win,NULL);
LIBAROMA_MSG _msg;
libaroma_window_process_event(win,libaroma_wm_compose(
&_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0)
);
return 1;
}
return 0;
} /* End of libaroma_window_measure */
/*
* Function : _libaroma_window_ui_thread
* Return Value: byte
* Descriptions: window ui thread
*/
byte _libaroma_window_ui_thread(LIBAROMA_WINDOWP win) {
int i;
byte need_sync = 0;
if (win->active==1){
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
LIBAROMA_CONTROLP c=win->childs[i];
if (c->handler->thread!=NULL){
if (c->handler->thread(c)){
if (libaroma_control_draw(c,0)){
libaroma_wm_updatesync(
c->x+win->x,
c->y+win->y,
c->w,
c->h,
0
);
need_sync=1;
}
}
}
}
}
return need_sync;
} /* End of _libaroma_window_ui_thread */
/*
* Function : libaroma_window
* Return Value: LIBAROMA_WINDOWP
* Descriptions: new window
*/
LIBAROMA_WINDOWP libaroma_window(
char * bg_theme_name,
int x, int y, int w, int h
){
__CHECK_WM(NULL);
LIBAROMA_WINDOWP win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1);
if (!win){
ALOGW("libaroma_window alloc window data failed");
return NULL;
}
if (bg_theme_name){
snprintf(win->theme_bg,256,"%s",bg_theme_name);
}
else{
win->theme_bg[0]=0;
}
win->rx = x;
win->ry = y;
win->rw = w;
win->rh = h;
win->ui_thread = _libaroma_window_ui_thread;
libaroma_window_measure_size(win);
return win;
} /* End of libaroma_window */
/*
* Function : libaroma_window_free
* Return Value: byte
* Descriptions: free window
*/
byte libaroma_window_free(
LIBAROMA_WINDOWP win
){
__CHECK_WM(0);
if (win==NULL){
return 0;
}
/* inactivate it */
if (win->parent==NULL){
if (libaroma_wm_get_active_window()==win){
/* detach active window from window manager */
libaroma_wm_set_active_window(NULL);
}
LIBAROMA_MSG _msg;
libaroma_window_process_event(win,
libaroma_wm_compose(&_msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0));
}
if (win->handler!=NULL){
if (win->handler->prefree!=NULL){
win->handler->prefree(win);
}
}
/* delete childs */
int i;
if (win->childn>0){
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
libaroma_control_free(win->childs[i]);
}
free(win->childs);
}
if (win->bg){
libaroma_canvas_free(win->bg);
win->bg=NULL;
}
if (win->dc){
libaroma_canvas_free(win->dc);
win->dc=NULL;
}
if (win->handler!=NULL){
if (win->handler->postfree!=NULL){
win->handler->postfree(win);
}
}
free(win);
return 1;
} /* End of libaroma_window_free */
/*
* Function : _libaroma_window_updatebg
* Return Value: byte
* Descriptions: update window background
*/
byte _libaroma_window_updatebg(LIBAROMA_WINDOWP win){
if (win==NULL){
ALOGW("window_recalculate win is NULL");
return 0;
}
if (win->handler!=NULL){
if (win->handler->updatebg!=NULL){
if (win->handler->updatebg(win)){
if (win->onupdatebg){
win->onupdatebg(win,win->bg);
}
return 1;
}
return 0;
}
}
if (win->parent!=NULL){
return 0;
}
int w = win->w;
int h = win->h;
/* draw background */
if (win->bg!=NULL){
if ((win->bg->w==w)&&(win->bg->h==h)){
/* not need recreate background */
return 1;
}
libaroma_canvas_free(win->bg);
}
win->bg = libaroma_canvas(w,h);
/* default canvas color */
libaroma_canvas_setcolor(
win->bg,
libaroma_colorget(NULL,win)->window_bg,
0xff
);
/* from theme canvas */
if (win->theme_bg[0]!=0){
libaroma_wm_draw_theme(
win->bg, win->theme_bg,
0, 0, win->bg->w, win->bg->h,
NULL
);
}
/* from updatebg callback */
if (win->onupdatebg!=NULL){
win->onupdatebg(win,win->bg);
}
return 1;
} /* End of _libaroma_window_updatebg */
/*
* Function : _libaroma_window_recalculate
* Return Value: byte
* Descriptions: recalculate client size
*/
byte _libaroma_window_recalculate(LIBAROMA_WINDOWP win){
if (win==NULL){
ALOGW("window_recalculate win is NULL");
return 0;
}
if (libaroma_window_isactive(win)){
_libaroma_window_updatebg(win);
libaroma_window_invalidate(win, 1);
}
return 1;
} /* End of _libaroma_window_recalculate */
/*
* Function : _libaroma_window_ready
* Return Value: byte
* Descriptions: window is ready
*/
byte _libaroma_window_ready(LIBAROMA_WINDOWP win){
__CHECK_WM(0);
if (win==NULL){
ALOGW("window_resize win is NULL");
return 0;
}
int x = win->x;
int y = win->y;
int w = win->w;
int h = win->h;
if (w==0){
w = libaroma_wm()->w;
x = 0;
}
if (h==0){
h = libaroma_wm()->h;
y = 0;
}
/* set position */
if (win->dc!=NULL){
libaroma_canvas_free(win->dc);
win->dc=NULL;
}
win->dc= libaroma_wm_canvas(x, y, w, h);
if (win->dc==NULL){
ALOGW("window_ready cannot allocate workspace drawing canvas");
return 0;
}
if (libaroma_window_isactive(win)){
libaroma_wm_clean_workspace();
}
win->x = x;
win->y = y;
win->w = win->dc->w;
win->h = win->dc->h;
_libaroma_window_measure_save(win,NULL);
_libaroma_window_recalculate(win);
return 1;
} /* End of _libaroma_window_ready */
/*
* Function : libaroma_window_resize
* Return Value: byte
* Descriptions: resize window
*/
byte libaroma_window_resize(
LIBAROMA_WINDOWP win,
int x, int y, int w, int h
){
if (!win){
return 0;
}
if (win->parent!=NULL){
ALOGW("window_resize cannot be used for child window");
return 0;
}
win->rx = x;
win->ry = y;
win->rw = w;
win->rh = h;
if (libaroma_window_measure_size(win)){
return _libaroma_window_ready(win);
}
return 0;
} /* End of libaroma_window_resize */
/*
* Function : libaroma_window_isactive
* Return Value: byte
* Descriptions: check if window is active
*/
byte libaroma_window_isactive(LIBAROMA_WINDOWP win){
if (win!=NULL){
LIBAROMA_WINDOWP w = win;
while(w->parent){
w=w->parent;
}
return ((w==libaroma_wm_get_active_window())?1:0);
}
return 0;
} /* End of libaroma_window_isactive */
/*
* Function : libaroma_window_add
* Return Value: byte
* Descriptions: add control into window
*/
byte libaroma_window_add(
LIBAROMA_WINDOWP win,
LIBAROMA_CONTROLP ctl
){
__CHECK_WM(0);
if (win==NULL){
ALOGW("window_add win is NULL");
return 0;
}
if (ctl==NULL){
ALOGW("window_add ctl is NULL");
return 0;
}
if (ctl->window != NULL){
ALOGW("window_add ctl already have window");
return 0;
}
libaroma_window_measure(win, ctl);
if (win->childn==0){
win->childs = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP));
if (!win->childs){
ALOGW("window_add malloc failed");
win->childs=NULL;
return 0;
}
win->childs[0]=ctl;
}
else{
LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *)
realloc(win->childs, sizeof(LIBAROMA_CONTROLP)*(win->childn+1));
if (!newchilds){
ALOGW("window_add realloc failed");
return 0;
}
win->childs = newchilds;
win->childs[win->childn] = ctl;
}
ctl->window = win;
win->childn++;
_libaroma_window_recalculate(win);
return 1;
} /* End of libaroma_window_add */
/*
* Function : libaroma_window_del
* Return Value: byte
* Descriptions: delete control from window
*/
byte libaroma_window_del(
LIBAROMA_WINDOWP win,
LIBAROMA_CONTROLP ctl
){
__CHECK_WM(0);
if (ctl==NULL){
ALOGW("window_del ctl is null");
return 0;
}
if (win==NULL){
ALOGW("window_del win is null");
return 0;
}
if (win != ctl->window){
return 0;
}
if (win->childn<=0){
ALOGW("window_del window data corrupt doesn't have childs??");
return 0;
}
else if (win->childn==1){
if (win->childs[0]==ctl){
ctl->window = NULL;
win->childn=0;
free(win->childs);
win->childs=NULL;
_libaroma_window_recalculate(win);
return 1;
}
else{
ALOGW("window_del ctl not found in window");
return 0;
}
}
LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *)
malloc(sizeof(LIBAROMA_CONTROLP)*(win->childn-1));
if (!newchilds){
ALOGW("window_del malloc temp childs failed");
return 0;
}
int j = 0;
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]!=ctl){
if (j==win->childn-1){
/* current ctl not found */
free(newchilds);
ALOGW("window_del ctl not found in window");
return 0;
}
newchilds[j++]=win->childs[i];
}
}
free(win->childs);
win->childs=newchilds;
win->childn--;
_libaroma_window_recalculate(win);
return 1;
} /* End of libaroma_window_del */
/*
* Function : libaroma_window_measure
* Return Value: byte
* Descriptions: measure control size
*/
byte libaroma_window_measure(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){
if (win&&ctl){
if (_libaroma_window_measurement_dp){
ctl->x = libaroma_dp(ctl->rx);
ctl->y = libaroma_dp(ctl->ry);
ctl->w = libaroma_dp(ctl->rw);
ctl->h = libaroma_dp(ctl->rh);
}
else{
ctl->x = ctl->rx;
ctl->y = ctl->ry;
ctl->w = ctl->rw;
ctl->h = ctl->rh;
}
ctl->x=libaroma_window_measure_calculate(
ctl->x, ctl->rx, win->w, 0, 0
);
ctl->y=libaroma_window_measure_calculate(
ctl->y, ctl->ry, win->h, 0, 0
);
ctl->w=libaroma_window_measure_calculate(
ctl->w,ctl->rw, win->w, 1, ctl->x
);
ctl->h=libaroma_window_measure_calculate(
ctl->h,ctl->rh, win->h, 1, ctl->y
);
if (ctl->w+ctl->x>win->w){
ctl->w = win->w-ctl->x;
}
if (ctl->h+ctl->y>win->h){
ctl->h = win->h-ctl->y;
}
if (ctl->w<ctl->minw){
ctl->w=ctl->minw;
}
if (ctl->h<ctl->minh){
ctl->h=ctl->minh;
}
_libaroma_window_measure_save(NULL,ctl);
if (ctl->handler->message){
LIBAROMA_MSG _msg;
ctl->handler->message(ctl, libaroma_wm_compose(
&_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0)
);
return 1;
}
}
return 0;
} /* End of libaroma_window_measure */
/*
* Function : libaroma_window_attach
* Return Value: LIBAROMA_CONTROLP
* Descriptions: attach control into window
*/
LIBAROMA_CONTROLP libaroma_window_attach(
LIBAROMA_WINDOWP win,
LIBAROMA_CONTROLP ctl){
/* attach into window */
if (win){
if (libaroma_window_add(win,ctl)){
return ctl;
}
ALOGW("window_attach cannot attach into window");
libaroma_control_free(ctl);
return NULL;
}
return ctl;
} /* End of libaroma_window_attach */
/*
* Function : libaroma_window_getid
* Return Value: LIBAROMA_CONTROLP
* Descriptions: get control by id
*/
LIBAROMA_CONTROLP libaroma_window_getid(
LIBAROMA_WINDOWP win, word id){
__CHECK_WM(NULL);
if (win==NULL){
ALOGW("window_control_id win is null");
return NULL;
}
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]->id==id){
return win->childs[i];
}
}
return NULL; /* not found */
} /* End of libaroma_window_getid */
/*
* Function : libaroma_window_setfocus
* Return Value: LIBAROMA_CONTROLP
* Descriptions: set control focus
*/
LIBAROMA_CONTROLP libaroma_window_setfocus(
LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){
if (win==NULL){
ALOGW("window_setfocus window is null");
return NULL;
}
if (ctl!=NULL){
/* set */
if (win!=ctl->window){
ALOGW("window_setfocus control is not window child");
return NULL;
}
if (ctl->handler->focus!=NULL){
if (win->focused==ctl){
return ctl;
}
if (ctl->handler->focus(ctl,1)){
if (win->focused){
win->focused->handler->focus(win->focused,0);
}
win->focused=ctl;
return ctl;
}
}
return NULL;
}
else{
/* find focus */
if (win->focused){
return win->focused;
}
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->focus!=NULL){
return libaroma_window_setfocus(win,win->childs[i]);
}
}
}
return NULL;
} /* End of libaroma_window_setfocus */
/*
* Function : libaroma_window_sync
* Return Value: byte
* Descriptions: sync window canvas
*/
byte libaroma_window_sync(LIBAROMA_WINDOWP win, int x, int y, int w, int h){
__CHECK_WM(0);
if (win==NULL){
ALOGW("libaroma_window_sync win is null");
return 0;
}
if (win->handler!=NULL){
if (win->handler->sync!=NULL){
return win->handler->sync(win,x,y,w,h);
}
}
if (win->parent!=NULL){
return 0;
}
if (!win->lock_sync){
if (!libaroma_window_isactive(win)){
ALOGW("libaroma_window_sync win is not active window");
return 0;
}
if (win->dc==NULL){
ALOGW("window_invalidate dc is null");
return 0;
}
/* sync workspace */
libaroma_wm_sync(win->x+x,win->y+y,w,h);
}
return 1;
} /* End of libaroma_window_sync */
/*
* Function : libaroma_window_invalidate
* Return Value: byte
* Descriptions: invalidate window drawing
*/
byte libaroma_window_invalidate(LIBAROMA_WINDOWP win, byte sync){
__CHECK_WM(0);
if (win==NULL){
ALOGW("window_invalidate win is null");
return 0;
}
if (win->handler!=NULL){
if (win->handler->invalidate!=NULL){
return win->handler->invalidate(win,sync);
}
}
if (win->parent!=NULL){
return 0;
}
if (!libaroma_window_isactive(win)){
ALOGW("window_invalidate win is not active window");
return 0;
}
if (win->dc==NULL){
ALOGW("window_invalidate dc is null");
return 0;
}
if ((!win->lock_sync)||(sync==10)){
/* draw bg */
libaroma_draw(
win->dc,
win->bg,
0, 0, 1);
/* draw childs */
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
/* draw no sync */
libaroma_control_draw(win->childs[i], 0);
}
/* sync */
if (sync){
libaroma_window_sync(win, 0, 0, win->w, win->h);
}
}
return 1;
} /* End of libaroma_window_invalidate */
/*
* Function : libaroma_window_anishow
* Return Value: byte
* Descriptions: show window - animated
*/
byte libaroma_window_anishow(
LIBAROMA_WINDOWP win,
byte animation,
int duration){
__CHECK_WM(0);
if (!win){
return 0;
}
if (win->parent!=NULL){
ALOGW("Child window cannot shown directly...");
return 0;
}
/* set initial focus
libaroma_window_setfocus(win, NULL);
*/
if ((!animation)||(duration<50)){
return libaroma_wm_set_active_window(win);
}
/* lock and retval */
byte retval = 0;
win->lock_sync = 1;
if (libaroma_wm_set_active_window(win)){
win->active=2;
/* draw window into temp canvas */
LIBAROMA_CANVASP wmc = win->dc;
LIBAROMA_CANVASP tdc = libaroma_canvas(wmc->w,wmc->h);
libaroma_draw(tdc,wmc,0,0,0);
win->dc=tdc; /* switch dc */
LIBAROMA_CANVASP back = libaroma_canvas(wmc->w, wmc->h);
libaroma_draw(back,wmc,0,0,0);
/* invalidate now */
libaroma_window_invalidate(win, 10);
long start = libaroma_tick();
int delta = 0;
while ((delta=libaroma_tick()-start)<duration){
float state = ((float) delta)/((float) duration);
if (state>=1.0){
break;
}
switch (animation){
case LIBAROMA_WINDOW_SHOW_ANIMATION_FADE:
{
float swift_out_state = libaroma_cubic_bezier_swiftout(state);
float bstate = 255.0 * swift_out_state;
byte bbstate = (byte) round(bstate);
libaroma_draw_opacity(
wmc, win->dc,0,0,0,bbstate
);
//libaroma_window_sync(win, 0, 0, win->w, win->h);
libaroma_wm_sync(win->x,win->y,win->w,win->h);
}
break;
case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP:
case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_TOP:
{
float swift_out_state = libaroma_cubic_bezier_swiftout(state);
int y = win->h - (swift_out_state * win->h);
int h = win->h - y;
if (h>0){
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP){
if (h<win->h){
libaroma_draw_ex(
wmc,
back,
0, 0, 0, win->h - (win->h - h), win->w, win->h-h,
0, 0xff
);
}
}
libaroma_draw_ex(
wmc,
win->dc,
0, y, 0, 0, win->w, h,
0, 0xff
);
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP){
libaroma_wm_sync(win->x,win->y,win->w,win->h);
}
else{
libaroma_wm_sync(win->x,win->y+y,win->w, h);
}
}
}
break;
case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM:
case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_BOTTOM:
{
float swift_out_state = libaroma_cubic_bezier_swiftout(state);
int y = 0 - (win->h - (swift_out_state * win->h));
int h = win->h + y;
if (h>0){
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM){
if (h<win->h){
libaroma_draw_ex(
wmc,
back,
0, h, 0, 0, win->w, win->h-h,
0, 0xff
);
}
}
libaroma_draw_ex(
wmc,
win->dc,
0, 0, 0, win->h-h, win->w, h,
0, 0xff
);
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM){
libaroma_wm_sync(win->x,win->y,win->w,win->h);
}
else{
libaroma_wm_sync(win->x,win->y,win->w,h);
}
}
}
break;
case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT:
case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_LEFT:
{
float swift_out_state = libaroma_cubic_bezier_swiftout(state);
int x = win->w - (swift_out_state * win->w);
int w = win->w - x;
if (w>0){
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT){
if (w<win->w){
libaroma_draw_ex(
wmc,
back,
0, 0, win->w - (win->w - w), 0, win->w - w, win->h,
0, 0xff
);
}
}
libaroma_draw_ex(
wmc,
win->dc,
x, 0, 0, 0, w, win->h,
0, 0xff
);
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT){
libaroma_wm_sync(win->x,win->y,win->w,win->h);
}
else{
libaroma_wm_sync(win->x+x,win->y,w, win->h);
}
}
}
break;
case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT:
case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_RIGHT:
{
float swift_out_state = libaroma_cubic_bezier_swiftout(state);
int x = 0 - (win->w - (swift_out_state * win->w));
int w = win->w + x;
if (w>0){
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT){
if (w<win->w){
libaroma_draw_ex(
wmc,
back,
w, 0, 0, 0, win->w - w, win->h,
0, 0xff
);
}
}
libaroma_draw_ex(
wmc,
win->dc,
0, 0, win->w-w, 0, w, win->h,
0, 0xff
);
if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT){
libaroma_wm_sync(win->x,win->y,win->w,win->h);
}
else{
libaroma_wm_sync(win->x,win->y,w, win->h);
}
}
}
break;
default:
/* invalid animation */
start=0;
break;
}
}
retval = 1;
libaroma_draw(wmc,win->dc,0,0,0);
win->dc=wmc; /* switch dc back */
/* cleanup */
libaroma_canvas_free(back);
libaroma_canvas_free(tdc);
}
win->lock_sync = 0;
/* sync view now */
if (retval){
win->active=1;
// libaroma_window_sync(win, 0, 0, win->w, win->h);
libaroma_wm_sync(win->x,win->y,win->w,win->h);
/* send activate */
LIBAROMA_MSG _msg;
libaroma_window_process_event(win,libaroma_wm_compose(
&_msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 10, 0)
);
}
return retval;
} /* End of libaroma_window_show */
/*
* Function : libaroma_window_calculate_pos
* Return Value: void
* Descriptions: calculate screen position to window/control position
*/
void libaroma_window_calculate_pos(
LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl,
int * x, int * y
){
if (win!=NULL){
*x-=win->x;
*y-=win->y;
}
else if ((ctl!=NULL)&&(ctl->window!=NULL)){
*x-=ctl->window->x;
*y-=ctl->window->y;
}
if (ctl!=NULL){
*x-=ctl->x;
*y-=ctl->y;
}
/*
*x-=libaroma_wm()->x;
*y-=libaroma_wm()->y;
*/
} /* End of libaroma_window_calculate_pos */
/*
* Function : libaroma_window_calculate_pos_abs
* Return Value: void
* Descriptions: calculate absolute screen position to top window position
*/
void libaroma_window_calculate_pos_abs(
LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl,
int * x, int * y
){
if (ctl!=NULL){
*x-=ctl->x;
*y-=ctl->y;
win=ctl->window;
}
while (win!=NULL){
*x-=win->ax;
*y-=win->ay;
win=win->parent;
}
} /* End of libaroma_window_calculate_pos_abs */
/*
* Function : _libaroma_window_is_inside
* Return Value: byte
* Descriptions: check position coordinate
*/
byte _libaroma_window_is_inside(LIBAROMA_CONTROLP ctl, int x, int y) {
int wx = ctl->x;
int wx2 = wx + ctl->w;
int wy = ctl->y;
int wy2 = wy + ctl->h;
if ((x >= wx) && (x < wx2) && (y >= wy) && (y < wy2)) {
return 1;
}
return 0;
} /* End of _libaroma_window_is_inside */
/*
* Function : libaroma_window_post_command
* Return Value: byte
* Descriptions: post direct command
*/
byte libaroma_window_post_command(dword cmd){
return
libaroma_msg_post(
LIBAROMA_MSG_WIN_DIRECTMSG,
0,
0,
(int) cmd,
0,
NULL
);
} /* End of libaroma_window_post_command */
/*
* Function : libaroma_window_post_command_ex
* Return Value: byte
* Descriptions: post direct command extended
*/
byte libaroma_window_post_command_ex(dword cmd,
byte state, int key, int y, voidp d){
return
libaroma_msg_post(
LIBAROMA_MSG_WIN_DIRECTMSG,
state,
key,
(int) cmd,
y,
d
);
} /* End of libaroma_window_post_command */
/*
* Function : libaroma_window_process_event
* Return Value: dword
* Descriptions: process message
*/
dword libaroma_window_process_event(LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){
__CHECK_WM(0);
if (win==NULL){
ALOGW("window_event win is null");
return 0;
}
if (win->parent!=NULL){
ALOGW("window_event cannot used for child window...");
return 0;
}
dword ret = 0;
if (win->handler){
if (win->handler->message_hooker){
if (win->handler->message_hooker(win,msg,&ret)){
return ret;
}
}
}
switch (msg->msg){
case LIBAROMA_MSG_WIN_ACTIVE:
{
/* set current window size */
win->focused=NULL;
win->touched=NULL;
if (msg->x!=10){
_libaroma_window_ready(win);
}
if ((!win->lock_sync)||(msg->x==10)){
if ((!win->active)||(msg->x==10)){
int i;
win->active=1;
/* signal child */
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
}
}
}
break;
case LIBAROMA_MSG_WIN_RESIZE:
{
int i;
_libaroma_window_ready(win);
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
}
break;
case LIBAROMA_MSG_WIN_INACTIVE:
{
if (win->active){
/* stop thread manager */
win->active=0;
/* send inactive message to child */
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
win->focused=NULL;
win->touched=NULL;
}
}
break;
case LIBAROMA_MSG_WIN_MEASURED:
{
/* remeasured all childs */
int i;
for (i=0;i<win->childn;i++){
libaroma_window_measure(win,win->childs[i]);
}
}
break;
case LIBAROMA_MSG_WIN_DIRECTMSG:
{
return (dword) msg->x;
}
break;
case LIBAROMA_MSG_WIN_INVALIDATE:
{
libaroma_window_invalidate(win, 1);
}
break;
case LIBAROMA_MSG_TOUCH:
{
/* touch handler */
if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){
win->touched = NULL;
int x = msg->x;
int y = msg->y;
libaroma_window_calculate_pos(win,NULL,&x,&y);
int i;
for (i=0;i<win->childn;i++){
if (_libaroma_window_is_inside(win->childs[i],x,y)){
win->touched = win->childs[i];
break;
}
}
if (win->touched!=NULL){
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
}
}
else if (win->touched!=NULL){
if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
}
else if (msg->state==LIBAROMA_HID_EV_STATE_UP){
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
win->touched=NULL;
}
}
}
break;
}
return ret;
} /* End of libaroma_window_process_event */
/*
* Function : libaroma_window_pool
* Return Value: dword
* Descriptions: poll window messages
*/
dword libaroma_window_pool(
LIBAROMA_WINDOWP win,
LIBAROMA_MSGP msg){
if (!win){
return 0;
}
if (win->parent!=NULL){
ALOGW("cannot pool child window...");
return 0;
}
LIBAROMA_MSG _msg;
LIBAROMA_MSGP cmsg=(msg!=NULL)?msg:&_msg;
byte ret = libaroma_wm_getmessage(cmsg);
if (ret){
return libaroma_window_process_event(win,cmsg);
}
return 0;
} /* End of libaroma_window_pool */
#undef __CHECK_WM
#endif /* __libaroma_window_c__ */
|
stream.c | // Copyright 2009-2018 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2018, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
key_recovery.c | /********************************************************************
* Practical key-recovery attack against FlexAEAD-64
* Variant attack applicable to FlexAE
*
* Written in 2020 by Gaëtan Leurent <gaetan.leurent@inria.fr>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to
* the public domain worldwide. This software is distributed without
* any warranty.
*
* http://creativecommons.org/publicdomain/zero/1.0/
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
// Directory for temporary file (8TB needed)
#define DIR "/tmp/"
#include <x86intrin.h>
# include <linux/version.h>
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
#if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 25)
#include <sys/random.h>
#else
#include <linux/random.h>
#include <sys/syscall.h>
ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
return syscall(SYS_getrandom, buf, buflen, flags);
}
#endif
#else
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
int fd = open("/dev/urandom", O_RDONLY);
ssize_t ret = read(fd, buf, buflen);
close(fd);
return ret;
}
#endif
#include "encrypt.h"
struct FlexAEADv1 {
unsigned char subkeys[BLOCKSIZE * 8];
unsigned char counter[BLOCKSIZE];
unsigned char checksum[BLOCKSIZE];
unsigned char state[BLOCKSIZE];
unsigned char sn[BLOCKSIZE];
unsigned long long nRounds;
unsigned long long nBytes;
};
int crypto_aead_encrypt(
unsigned char *c,unsigned long long *clen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *ad,unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
struct FlexAEADv1 flexaeadv1
);
void FlexAEADv1_init(struct FlexAEADv1 * self, unsigned char *key );
// Master key
uint8_t Master_K[KEYSIZE];
struct FlexAEADv1 flexAEAD;
#define DATA (1ULL<<28)
#define DATA2 (1ULL<<38)
#define SQRT2 362/256
typedef struct {
uint64_t C;
uint64_t N:63, t:1;
} __attribute__((packed)) data_t;
/* void print_diff_pair (data_t a, data_t b); */
int test_K2A (uint64_t K);
int test_K2B3A (uint64_t K, uint64_t S0, uint64_t S1);
int filter_diff_phase1(uint64_t delta);
typedef int (*callback_t)(uint64_t);
uint64_t recover_state (uint64_t S0, uint64_t S1, callback_t filter_diff, callback_t test_state);
int compare_data (const void *a, const void *b) {
const data_t *A = a;
const data_t *B = b;
if (A->C < B->C)
return -1;
else if (A->C > B->C)
return 1;
else
return 0;
}
void sort_data (const data_t *d, uint64_t N);
void sort_data_mask (const data_t *d, uint64_t N, uint64_t mask);
void sort_data_large (const data_t *d, uint64_t N);
void make_nonce(uint8_t N[BLOCKSIZE], uint32_t n) {
for (int i=0; i<8; i++)
N[i] = ((n>>(4*i))&0xf)<<4;
}
static uint8_t AES_SBOX[] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
#define Sbox(x) AES_SBOX[x]
uint32_t SBOX(uint32_t x) {
uint32_t y =
Sbox( x &0xff) |
(Sbox((x>>8 )&0xff)<<8 ) |
(Sbox((x>>16)&0xff)<<16) |
(Sbox((x>>24)&0xff)<<24);
return y;
}
static uint8_t AES_SBOXI[] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
};
#define SboxI(x) AES_SBOXI[x]
uint32_t SBOXI(uint32_t x) {
uint32_t y =
SboxI( x &0xff) |
(SboxI((x>>8 )&0xff)<<8 ) |
(SboxI((x>>16)&0xff)<<16) |
(SboxI((x>>24)&0xff)<<24);
return y;
}
uint64_t round_function(uint64_t x);
uint64_t inverse_round_function(uint64_t x);
// Partial keys recovered
uint64_t K2A;
uint64_t K2B3A;
uint64_t K0B;
uint64_t K0A;
uint64_t K2B;
uint64_t K3B;
uint64_t K2A3B;
int main() {
uint64_t C[(2*BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
uint8_t N[BLOCKSIZE] = {0};
for (int i=0; i<KEYSIZE; i++)
Master_K[i] = rand();
// Compute subkeys
FlexAEADv1_init(&flexAEAD, Master_K);
/* printf ("K3A: "); */
/* for (int i=0; i<BLOCKSIZE; i++) */
/* printf ("%02x", flexAEAD.subkeys[6*BLOCKSIZE+i]); */
/* printf ("\n"); */
printf ("K2A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[4*BLOCKSIZE+i]);
printf ("\n");
printf ("K2B3A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[5*BLOCKSIZE+i]^flexAEAD.subkeys[6*BLOCKSIZE+i]);
printf ("\n");
printf ("K0B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[1*BLOCKSIZE+i]);
printf ("\n");
printf ("K0A: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[0*BLOCKSIZE+i]);
printf ("\n");
printf ("K2B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[5*BLOCKSIZE+i]);
printf ("\n");
printf ("K3B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[7*BLOCKSIZE+i]);
printf ("\n");
printf ("K2A3B: ");
for (int i=0; i<BLOCKSIZE; i++)
printf ("%02x", flexAEAD.subkeys[4*BLOCKSIZE+i]^flexAEAD.subkeys[7*BLOCKSIZE+i]);
printf ("\n");
fflush(stdout);
// Hash table
data_t *data = malloc(2*DATA*sizeof(data_t));
assert(data);
printf ("Generate phase 1 data...");
fflush(stdout);
// Encrypt zero message with structure of nonces
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA; n++) {
make_nonce(N, n);
uint8_t M[2*BLOCKSIZE] = {0};
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, flexAEAD);
assert(clen <= sizeof(C));
data[2*n ] = (data_t){C: C[0], N: n, t: 0};
data[2*n+1] = (data_t){C: C[1], N: n, t: 1};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, 2*DATA, sizeof(data_t), compare_data);
sort_data(data, 2*DATA);
int n=0;
for (unsigned i=1; i<2*DATA; i++)
if (data[i].C == data[i-1].C) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<2*DATA; i++) {
if (data[i].C == data[i-1].C) {
assert(data[i].t != data[i-1].t);
// print_diff_pair(data[i-1], data[i]);
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
make_nonce(N0, data[i-1].N);
make_nonce(N1, data[i ].N);
uint64_t S0 = 0;
uint64_t S1 = 0;
for (int i=0; i<BLOCKSIZE; i++) {
S0 = (S0<<8)^N0[i];
S1 = (S1<<8)^N1[i];
}
int test_key(uint64_t S) {
return test_K2A(S^S0);
}
if (recover_state(S0, S1, filter_diff_phase1, test_key))
break;
}
}
if (!K2A) {
printf ("Failed to recover K2A\n");
exit(0);
} else {
printf ("Recovered K2A = %016llx\n", (unsigned long long)K2A);
fflush(stdout);
}
printf ("Generate phase 2 data...");
fflush(stdout);
// Generate structure of nonces
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (unsigned n=0; n<DATA*SQRT2; n++) {
uint64_t S = _pdep_u64(n, 0xf0f0f0f0f0f0f0f0LL);
uint8_t M[BLOCKSIZE] = {n, 0, 0, 0, n};
for (int i=0; i<5; i++) {
S = inverse_round_function(S);
}
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, flexAEAD);
assert(clen <= sizeof(C));
data[n] = (data_t){C: C[1], N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, 2*DATA, sizeof(data_t), compare_data);
sort_data(data, DATA*SQRT2);
n=0;
for (unsigned i=1; i<DATA*SQRT2; i++)
if (data[i].C == data[i-1].C) n++;
printf ("Found %i collisions\n", n);
fflush(stdout);
for (unsigned i=1; i<DATA*SQRT2; i++) {
if (data[i].C == data[i-1].C) {
/* for (int z=0; z<2; z++) { */
/* uint64_t S = _pdep_u64(data[i-z].N, 0xf0f0f0f0f0f0f0f0LL); */
/* uint8_t M[BLOCKSIZE] = {data[i-z].N}; */
/* for (int i=0; i<5; i++) { */
/* S = inverse_round_function(S); */
/* } */
/* S ^= K2A; */
/* uint8_t N[BLOCKSIZE]; */
/* for (int i=0; i<BLOCKSIZE; i++) { */
/* N[i] = S>>(56-8*i); */
/* } */
/* flexAEAD_dbg = 1; */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N, flexAEAD); */
/* flexAEAD_dbg = 0; */
/* } */
uint64_t S0 = _pdep_u64(data[i-1].N, 0xf0f0f0f0f0f0f0f0LL);
uint64_t S1 = _pdep_u64(data[i ].N, 0xf0f0f0f0f0f0f0f0LL);
int filter_diff(uint64_t delta) {
if ((delta & 0x00ffffff00ffffffLL) == 0) {
uint64_t d = _pext_u64(S0^S1, 0xf0f0);
d = (d<<56) | (d<<24);
if (delta == d)
return 1;
}
return 0;
}
int test_state(uint64_t S) {
return test_K2B3A(S^S0, S0, S1);
}
if (recover_state(S0, S1, filter_diff, test_state))
break;
}
}
if (!K2B3A) {
printf ("Failed to recover K2B3A\n");
exit(0);
} else {
printf ("Recovered K2B3A = %016llx\n", (unsigned long long)K2B3A);
fflush(stdout);
}
// X0,Y0
free(data);
{
// Allocate with mmap using scratch file
int fd;
fd = open(DIR "/mmap.tmp", O_RDWR | O_CREAT | O_TRUNC, 0644);
// fd = open(DIR "/mmap.tmp", O_RDWR, 0644);
if (!fd) {
perror("open failed");
exit(-1);
}
#if 0
int err = ftruncate(fd, sizeof(data_t)*DATA2);
if (err) {
perror("ftruncate failed");
exit(-1);
}
#else
#define BUFSIZE (1ULL<<30)
void *scratch = calloc(BUFSIZE, 1);
for (unsigned i=0; i<sizeof(data_t)*(DATA2+BUFSIZE-1)/BUFSIZE; i++) {
int r = write(fd, scratch, BUFSIZE);
if (r != BUFSIZE) {
perror("mmap failed");
exit(-1);
}
}
free(scratch);
#endif
data = mmap(NULL, sizeof(data_t)*DATA2, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (!data) {
perror("mmap failed");
exit(-1);
}
madvise(data, sizeof(data_t)*DATA2, MADV_SEQUENTIAL);
}
printf ("Generate phase 3 data...");
fflush(stdout);
// Identify nonce with no carries
uint64_t X0 = 0;
{
uint64_t C0[TAGSIZE/8];
unsigned long long clen;
uint8_t N[BLOCKSIZE] = {42};
uint8_t M[0];
crypto_aead_encrypt((uint8_t*)C0, &clen, M, sizeof(M), NULL, 0, NULL, N, flexAEAD);
for (int n=0; n<64; n++) {
uint64_t X;
int r = getrandom(&X, sizeof(X), 0);
assert(r == sizeof(X));
uint64_t S = X;
for (int i=0; i<5; i++)
S = inverse_round_function(S);
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
uint8_t M[2*BLOCKSIZE] = {0};
S = X^K2B3A^0x0100000001000000LL;
for (int i=0; i<5; i++)
S = round_function(S);
for (int i=0; i<BLOCKSIZE; i++) {
M[i] = S>>(56-8*i);
}
S = X^K2B3A^0x0200000002000000LL;
for (int i=0; i<5; i++)
S = round_function(S);
for (int i=0; i<BLOCKSIZE; i++) {
M[BLOCKSIZE+i] = S>>(56-8*i);
}
uint64_t C[TAGSIZE/8];
crypto_aead_encrypt((uint8_t*)C, &clen, NULL, 0, M, sizeof(M), NULL, N, flexAEAD);
assert(clen <= sizeof(C));
if (C0[0] == C[0]) {
X0 = X;
break;
}
}
}
if (!X0) {
printf ("Failed to locate carry-less nonce\n");
exit(0);
}
// Generate structure of Si
#pragma omp parallel for private(C) private(clen) firstprivate(N)
for (uint64_t n=0; n<DATA2; n++) {
uint64_t delta = _pdep_u64(n, 0x00ffffff00ffffffLL);
// Build (N,S) pairs without touching carries
uint64_t S = X0^delta;
for (int i=0; i<5; i++)
S = inverse_round_function(S);
S ^= K2A;
uint8_t N[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N[i] = S>>(56-8*i);
}
uint8_t M[2*BLOCKSIZE] = {0};
S = X0^delta^K2B3A^0x0100000001000000LL;
for (int i=0; i<5; i++)
S = round_function(S);
S ^= 0xdeadbeefLL*n;
for (int i=0; i<BLOCKSIZE; i++) {
M[i] = S>>(56-8*i);
}
S = X0^delta^K2B3A^0x0200000002000000LL;
for (int i=0; i<5; i++)
S = round_function(S);
S ^= 0xdeadbeefLL*n;
S ^= 0xf0000000f0000000LL;
for (int i=0; i<BLOCKSIZE; i++) {
M[BLOCKSIZE+i] = S>>(56-8*i);
}
crypto_aead_encrypt((uint8_t*)C, &clen, NULL, 0, M, sizeof(M), NULL, N, flexAEAD);
assert(clen <= sizeof(C));
data[n] = (data_t){C: __builtin_bswap64(C[0]), N: n, t: 0};
}
printf ("Done\n");
fflush(stdout);
// Look for collisions
// qsort(data, DATA2, sizeof(data_t), compare_data);
time_t now;
time(&now);
printf("Starting sort at: %s\n", ctime(&now));
fflush(stdout);
sort_data_large(data, DATA2);
time(&now);
printf("Finished sort at: %s\n", ctime(&now));
fflush(stdout);
unsigned n_coll=0;
for (uint64_t i=1; i<DATA2; i++)
if (data[i].C == data[i-1].C) n_coll++;
printf ("Found %i collisions\n", n_coll);
fflush(stdout);
data_t *coll = malloc(2*n_coll*sizeof(data_t));
n_coll = 0;
for (uint64_t i=1; i<DATA2; i++) {
if (data[i].C == data[i-1].C) {
coll[2*n_coll ] = data[i-1];
coll[2*n_coll+1] = data[i];
n_coll++;
}
}
{
int filter_diff(uint64_t delta) {
return 1;
}
int test_state(uint64_t X) {
// Try all collisions
for (unsigned i=0; i<n_coll; i++) {
uint64_t S0 = 0xdeadbeefLL*coll[2*i].N;
uint64_t S1 = (0xdeadbeefLL*coll[2*i].N) ^ 0xf0000000f0000000LL;
uint64_t T0 = 0xdeadbeefLL*coll[2*i+1].N;
uint64_t T1 = (0xdeadbeefLL*coll[2*i+1].N) ^ 0xf0000000f0000000LL;
uint64_t K = S0 ^ X;
S0 ^= K;
S1 ^= K;
T0 ^= K;
T1 ^= K;
for (int i=0; i<5; i++) {
S0 = round_function(S0);
S1 = round_function(S1);
T0 = round_function(T0);
T1 = round_function(T1);
}
if ((S0^S1) == (T0^T1)) {
K2A3B = K;
printf ("K2A3B candidate: %016llx\n", (unsigned long long)K2A3B);
fflush(stdout);
// return 1;
}
}
return 0;
}
recover_state(0, 0xf0000000f0000000LL, filter_diff, test_state);
}
if (!K2A3B) {
printf ("Failed to recover K2A3B\n");
exit(0);
}
}
uint64_t round_function(uint64_t x) {
uint64_t T = _pdep_u64(x>>32, 0xf0f0f0f0f0f0f0f0) | _pdep_u64(x, 0x0f0f0f0f0f0f0f0f);
uint32_t R = T;
uint32_t L = T>>32;
R = SBOX(R);
L ^= R;
L = SBOX(L);
R ^= L;
R = SBOX(R);
return R | ((uint64_t)L)<<32;
}
uint64_t inverse_round_function(uint64_t x) {
uint32_t R = x;
uint32_t L = x>>32;
R = SBOXI(R);
R ^= L;
L = SBOXI(L);
L ^= R;
R = SBOXI(R);
uint64_t T = R | ((uint64_t)L)<<32;
return _pext_u64(T,0x0f0f0f0f0f0f0f0f) | _pext_u64(T,0xf0f0f0f0f0f0f0f0)<<32;
}
int filter_diff_phase1(uint64_t delta) {
return ( ((delta & 0x00ffffff00ffffffLL) == 0) &&
((delta & 0x0100000001000000LL) == 0x0100000001000000LL) &&
__builtin_popcountll((delta>>24) + 0x0100000001) == 2);
}
int test_K2A (uint64_t K) {
for (int n=0; n<2; n++) {
// Build pair that should collide
uint64_t S0 = 0x0000000000000000LL ^ (n<<24);
uint64_t S1 = 0x0100000001000000LL ^ (n<<24);
for (int i=0; i<5; i++) {
S0 = inverse_round_function(S0);
S1 = inverse_round_function(S1);
}
S0 ^= K;
S1 ^= K;
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N0[i] = S0>>(56-8*i);
N1[i] = S1>>(56-8*i);
}
uint8_t M[2*BLOCKSIZE] = {0};
uint64_t C0[(2*BLOCKSIZE+TAGSIZE)/8];
uint64_t C1[(2*BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
crypto_aead_encrypt((uint8_t*)C0, &clen, M, sizeof(M), NULL, 0, NULL, N0, flexAEAD);
assert(clen <= sizeof(C0));
crypto_aead_encrypt((uint8_t*)C1, &clen, M, sizeof(M), NULL, 0, NULL, N1, flexAEAD);
assert(clen <= sizeof(C1));
if (C0[0] == C1[1] || C0[1] == C1[0]) {
K2A = K;
return 1;
}
}
return 0;
}
int test_pair_K2B3A (uint64_t X0, uint64_t X1, uint64_t Y0, uint64_t Y1) {
// Build pair that should collide
uint64_t S0 = X1;
uint64_t S1 = X0;
for (int i=0; i<5; i++) {
S0 = inverse_round_function(S0);
S1 = inverse_round_function(S1);
}
S0 ^= K2A;
S1 ^= K2A;
uint8_t N0[BLOCKSIZE];
uint8_t N1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
N0[i] = S0>>(56-8*i);
N1[i] = S1>>(56-8*i);
}
S0 = Y0;
S1 = Y1;
for (int i=0; i<5; i++) {
S0 = round_function(S0);
S1 = round_function(S1);
}
uint8_t M0[BLOCKSIZE];
uint8_t M1[BLOCKSIZE];
for (int i=0; i<BLOCKSIZE; i++) {
M0[i] = S0>>(56-8*i);
M1[i] = S1>>(56-8*i);
}
uint64_t C0[(BLOCKSIZE+TAGSIZE)/8];
uint64_t C1[(BLOCKSIZE+TAGSIZE)/8];
unsigned long long clen;
crypto_aead_encrypt((uint8_t*)C0, &clen, M0, sizeof(M0), NULL, 0, NULL, N0, flexAEAD);
assert(clen <= sizeof(C0));
crypto_aead_encrypt((uint8_t*)C1, &clen, M1, sizeof(M1), NULL, 0, NULL, N1, flexAEAD);
assert(clen <= sizeof(C1));
if (C0[1] == C1[1]) {
// printf ("## %08x %08x\n", C0[1], C1[1]);
return 1;
}
return 0;
}
int test_K2B3A (uint64_t K, uint64_t X0, uint64_t X1) {
static int n = 0;
n++;
uint64_t KD = 0;
uint64_t d = 0x0012345600789abcLL;
if (test_pair_K2B3A(X0, X1^d, X0^K, X1^K^d)) {
/* printf ("Candidate key: %016llx\n", K); */
// Clean up carries from plus-one
if (test_pair_K2B3A(X0, X0^(2ULL<<24), X0^K, X0^K^(2ULL<<24))) {
// Ok, no carry
KD ^= 1ULL<<24;
} else {
for (uint64_t mask=3; mask<0x100; mask=2*mask+1) {
if (test_pair_K2B3A(X0, X0^(1ULL<<24), X0^K, X0^K^(mask<<24))) {
KD ^= mask<<24;
break;
}
}
}
if (test_pair_K2B3A(X0, X0^(2ULL<<56), X0^K, X0^K^(2ULL<<56))) {
// Ok, no carry
KD ^= 1ULL<<56;
} else {
for (uint64_t mask=3; mask<0x100; mask=2*mask+1) {
if (test_pair_K2B3A(X0, X0^(1ULL<<56), X0^K, X0^K^(mask<<56))) {
KD ^= mask<<56;
break;
}
}
}
if ((KD&0xff00000000000000LL) == 0 || (KD&0x00000000ff000000LL) == 0) {
return 0;
}
K2B3A = K^KD;
/* printf ("Cleaned-up : %016llx [n=%i]\n", (unsigned long long)(K^KD), n); */
return 1;
} else {
return 0;
}
}
// Recover internal state from difference,
// assuming differential path is followed
// Callbacks:
// - filter_diff to test output difference
// - test_state is called on each candidate
uint64_t recover_state (uint64_t S0, uint64_t S1, callback_t filter_diff, callback_t test_state) {
printf("Trying to recover key from pair (%016llx %016llx)\n", (unsigned long long)S0, (unsigned long long)S1);
int ret = 0;
// first superbox
#pragma omp parallel for schedule(dynamic)
for (uint32_t k1=0; k1 < 0x10000; k1++) {
uint64_t T0 = S0;
uint64_t T1 = S1;
T0 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T1 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T0 = round_function(T0);
T1 = round_function(T1);
if ( (((T0^T1) & 0x0f0000000f000000LL) == 0) ||
(((T0^T1) & 0xf0000000f0000000LL) == 0) ) {
// second superbox
for (uint32_t k2=0; k2 < 0x10000; k2++) {
T0 = S0;
T1 = S1;
T0 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T1 ^= _pdep_u64(k1, 0xf000f000f000f000LL);
T0 ^= _pdep_u64(k2, 0x00f000f000f000f0LL);
T1 ^= _pdep_u64(k2, 0x00f000f000f000f0LL);
T0 = round_function(T0);
T1 = round_function(T1);
if ( (((T0^T1) & 0x0fff0fff0fff0fffLL) == 0) ||
(((T0^T1) & 0xf0fff0fff0fff0ffLL) == 0) ) {
T0 = round_function(T0);
T1 = round_function(T1);
uint64_t mask = 0;
if ( (((T0^T1) & 0x0fffffff0fffffffLL) == 0) ||
(((T0^T1) & 0xff0fffffff0fffffLL) == 0) )
mask = 0xffff0f0fffff0f0fLL;
if ( (((T0^T1) & 0xf0fffffff0ffffffLL) == 0) ||
(((T0^T1) & 0xfff0fffffff0ffffLL) == 0) )
mask = 0xfffff0f0fffff0f0LL;
if (mask) {
int n = 0;
// printf ("### %04x %04x\n", k1, k2);
// Guess additional bytes
for (uint32_t Y=0; Y<0x10000; Y++) {
uint64_t U0 = T0 & mask;
uint64_t U1 = T1 & mask;
U0 |= _pdep_u64(Y, ~mask);
U1 |= _pdep_u64(Y, ~mask);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
if ( (((U0^U1) & 0x0fffffff0fffffffLL) == 0) ||
(((U0^U1) & 0xffff0fffffff0fffLL) == 0) ) {
// Guess final bytes
for (uint32_t Z=0; Z<0x10000; Z++) {
n++;
U0 = T0 & 0xffff0000ffff0000LL;
U1 = T1 & 0xffff0000ffff0000LL;
U0 |= _pdep_u64(Y, ~mask);
U1 |= _pdep_u64(Y, ~mask);
U0 |= _pdep_u64(Z, (~mask)^0x0000ffff0000ffffLL);
U1 |= _pdep_u64(Z, (~mask)^0x0000ffff0000ffffLL);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
U0 = round_function(U0);
U1 = round_function(U1);
uint64_t delta = U0^U1;
if (filter_diff(delta)) {
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
U0 = inverse_round_function(U0);
U1 = inverse_round_function(U1);
assert((S0^U0) == (S1^U1));
/* printf ("Candidate key: %016llx [delta:%016llx] [%04x %04x]\n", */
/* (unsigned long long)(S0^U0), (unsigned long long)delta, k1, k2); */
if (test_state(U0)) {
// printf ("Recovered key? %016llx [delta:%016llx]\n", (unsigned long long)(S0^U0), (unsigned long long)delta);
#pragma omp critical
{
ret=1;
}
}
}
}
}
}
}
}
}
}
}
return ret;
}
void print_diff_state (uint8_t S0[BLOCKSIZE], uint8_t S1[BLOCKSIZE]) {
for (int i=0; i<BLOCKSIZE; i++)
printf(" %01x%01x", (S0[i]^S1[i])&0xf, (S0[i]^S1[i])>>4);
printf ("\n");
}
inline void dirShuffleLayer( unsigned char * block, unsigned long long blocklen, unsigned char * state )
{
unsigned long long i = 0;
for( i=0; i<blocklen/2; i++)
{
*(state+2*i+0) = ( (*(block+i+0)) & 0xf0) + ((*(block+i+(blocklen/2))&0xf0)>>4);
*(state+2*i+1) = ( (*(block+i+0) & 0x0f)<<4) + ((*(block+i+(blocklen/2))&0x0f));
}
memcpy( block, state, blocklen);
return;
}
/* void print_diff_pair (data_t a, data_t b) { */
/* uint8_t N0[BLOCKSIZE] = {0}; */
/* uint8_t N1[BLOCKSIZE] = {0}; */
/* uint8_t M[2*BLOCKSIZE] = {0}; */
/* uint64_t C[(2*BLOCKSIZE+TAGSIZE)/8]; */
/* unsigned long long clen; */
/* make_nonce(N0, a.N); */
/* make_nonce(N1, b.N); */
/* flexAEAD_dbg = 1; */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N0, flexAEAD); */
/* crypto_aead_encrypt((uint8_t*)C, &clen, M, sizeof(M), NULL, 0, NULL, N1, flexAEAD); */
/* flexAEAD_dbg = 0; */
/* } */
|
Crivo_Openmp.c | #include <math.h>
#include <omp.h>
#include "lib/Lista.c"
lista_t* crivo(int nThreads, int MAX);
/*
Compilar: gcc -Wall Crivo_Openmp.c -o Crivo_Openmp -fopenmp -lm;
Entrada: int nThread, int FIM;
nThread é o númerod e threads a serem utilizadas;
FIM é o número limite a ser verificado os números primos de 2 até FIM;
*/
int main(int argc, char *argv[]) {
if(argc != 3) {
printf(KRED "\tErro: Numero incorreto de argumentos.\n" KRESET);
return 0;
}
// Iniciando variaveis
int nThreads = atoi(argv[1]);
int FIM = atoi(argv[2]);
lista_t* primos = NULL;
omp_set_num_threads(nThreads);
primos = crivo(nThreads, FIM);
/* Caso queira imprimir os números primos descomente a linha abaixo */
//exibe(primos);
printf("\t\tQuantidade: %d\t\t\tTempo(s): ", quantidade(primos));
return 0;
}
lista_t* crivo(int nThreads, int MAX) {
lista_t* primos = NULL;
primos = criaLista(primos);
lista_t* p;
if(MAX < 5){
insereInicio(primos,2);
insereInicio(primos,3);
return primos;
}
int flag = 1, i, MIN = (int) sqrt(MAX);
primos = crivo(nThreads, MIN);
MIN++;
if(MIN%2==0) MIN++;
#pragma omp parallel private (p)
{
node_t* node = NULL;
p = NULL;
p = criaLista(p);
#pragma omp for private (node, i, flag) schedule (dynamic)
for(i = MIN; i <= MAX; i+=2){
flag = 1;
node = primos->primeiro;
do{
if(i % node->valor == 0){
flag = 0;
break;
}
node = node->proximo;
} while (node != NULL);
if(flag) {
insereInicio(p, i);
}
}
#pragma omp critical
{
uniListas(primos,p);
}
}
return primos;
}
|
singleModificado.c | #include <stdio.h>
#include <omp.h>
int main() {
int n = 9, i, a, b[n];
for (i = 0; i < n; i++) b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicialización a: ");
scanf("%d", &a);
printf("Single ejecutada por el thread %d\n",
omp_get_thread_num());
}
#pragma omp for
for (i = 0; i < n; i++)
b[i] = a;
}
#pragma omp single
{
printf("Dentro de la región parallel:\n");
printf("Single ejecutada por el thread %d\n", omp_get_thread_num());
for (i = 0; i < n; i++) printf("b[%d] = %d\t", i, b[i]);
printf("\n");
}
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, outptr, 0);
r0 += 4 * 2;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_msa(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_binop__ne_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int16)
// A*D function (colscale): GB (_AxD__ne_int16)
// D*A function (rowscale): GB (_DxB__ne_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int16)
// C=scalar+B GB (_bind1st__ne_int16)
// C=scalar+B' GB (_bind1st_tran__ne_int16)
// C=A+scalar GB (_bind2nd__ne_int16)
// C=A'+scalar GB (_bind2nd_tran__ne_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT16 || GxB_NO_NE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
oyranos_cmm_oyra_image_ppm.c | /** @file oyranos_cmm_oyra_image.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2008-2015 (C) Kai-Uwe Behrmann
*
* @brief modules for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2008/10/07
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi4_s_.h"
#include "oyCMMapi7_s.h"
#include "oyCMMapi7_s_.h"
#include "oyCMMapiFilters_s.h"
#include "oyCMMui_s_.h"
#include "oyConnectorImaging_s_.h"
#include "oyFilterNode_s_.h" /* for oyFilterNode_TextToInfo_ */
#include "oyRectangle_s_.h"
#include "oyranos_config_internal.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_generic.h" /* oy_connector_imaging_static_object */
#include "oyranos_helper.h"
#include "oyranos_icc.h"
#include "oyranos_i18n.h"
#include "oyranos_io.h"
#include "oyranos_definitions.h"
#include "oyranos_string.h"
#include "oyranos_texts.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
typedef uint16_t half;
int wread ( unsigned char *data, /* read a word */
size_t pos,
size_t max,
size_t *start,
size_t *length );
oyOptions_s* oyraFilter_ImageOutputPPMValidateOptions
( oyFilterCore_s * filter,
oyOptions_s * validate,
int statical OY_UNUSED,
uint32_t * result )
{
uint32_t error = !filter;
#if 0
oyDATATYPE_e data_type = 0;
int planar, channels;
oyImage_s * image = 0;
if(!error)
filter = node->filter;
if(!error)
error = filter->type_ != oyOBJECT_FILTER_S;
if(!error)
{
if(filter->image_ && filter->image_->layout_)
{
data_type = oyToDataType_m( filter->image_->layout_[0] );
if(!(data_type == oyUINT8 ||
data_type == oyUINT16 ||
data_type == oyFLOAT ||
data_type == oyDOUBLE
))
error = 1;
planar = oyToPlanar_m( filter->image_->layout_[0] );
if(!error && planar)
error = 1;
channels = oyToChannels_m( filter->image_->layout_[0] );
if(!error && channels > 4)
error = 1;
}
}
#endif
if(!error)
error = !oyOptions_FindString( validate, "filename", 0 );
*result = error;
return 0;
}
/** @func oyraFilterPlug_ImageOutputPPMWrite
* @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.3.1
* @since 2008/10/07 (Oyranos: 0.1.8)
* @date 2011/05/12
*/
int oyraFilterPlug_ImageOutputPPMWrite (
oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
oyFilterSocket_s * socket;
oyFilterNode_s * node = 0;
oyOptions_s * node_opts = 0;
int result = 0;
const char * filename = 0;
FILE * fp = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
node_opts = oyFilterNode_GetOptions( node, 0 );
/* to reuse the requestor_plug is a exception for the starting request */
if(node)
result = oyFilterNode_Run( node, requestor_plug, ticket );
else
result = 1;
if(result <= 0)
filename = oyOptions_FindString( node_opts, "filename", 0 );
if(filename)
fp = fopen( filename, "wb" );
if(fp)
{
oyImage_s *image_output = (oyImage_s*)oyFilterSocket_GetData( socket );
const char * comment = oyOptions_FindString( node_opts, "comment", NULL );
fclose (fp); fp = 0;
result = oyImage_WritePPM( image_output, filename,
comment ? comment :
oyFilterNode_GetRelatives( node ) );
}
return result;
}
const char ppm_write_extra_options[] = {
"\n\
<" OY_TOP_SHARED ">\n\
<" OY_DOMAIN_INTERNAL ">\n\
<" OY_TYPE_STD ">\n\
<" "file_write" ">\n\
<filename></filename>\n\
<comment></comment>\n\
</" "file_write" ">\n\
</" OY_TYPE_STD ">\n\
</" OY_DOMAIN_INTERNAL ">\n\
</" OY_TOP_SHARED ">\n"
};
int oyraPPMwriteUiGet ( oyCMMapiFilter_s * module OY_UNUSED,
oyOptions_s * opts OY_UNUSED,
int flags OY_UNUSED,
char ** xforms_layout,
oyAlloc_f allocateFunc )
{
char * text = (char*)allocateFunc(5);
text[0] = 0;
*xforms_layout = text;
return 0;
}
oyDATATYPE_e oyra_image_ppm_data_types[6] = {oyUINT8, oyUINT16, oyHALF,
oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s_ oyra_imageOutputPPM_connector_out = {
oyOBJECT_CONNECTOR_IMAGING_S,0,0,
(oyObject_s)&oy_connector_imaging_static_object,
oyCMMgetImageConnectorSocketText, /* getText */
oy_image_connector_texts, /* texts */
"//" OY_TYPE_STD "/image.data", /* connector_type */
oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */
0, /* is_plug == oyFilterPlug_s */
oyra_image_ppm_data_types,
4, /* data_types_n; elements in data_types array */
-1, /* max_color_offset */
1, /* min_channels_count; */
32, /* max_channels_count; */
1, /* min_color_count; */
32, /* max_color_count; */
0, /* can_planar; can read separated channels */
1, /* can_interwoven; can read continuous channels */
0, /* can_swap; can swap color channels (BGR)*/
0, /* can_swap_bytes; non host byte order */
0, /* can_revert; revert 1 -> 0 and 0 -> 1 */
1, /* can_premultiplied_alpha; */
1, /* can_nonpremultiplied_alpha; */
0, /* can_subpixel; understand subpixel order */
0, /* oyCHANNELTYPE_e * channel_types; */
0, /* count in channel_types */
1, /* id; relative to oyFilter_s, e.g. 1 */
0 /* is_mandatory; mandatory flag */
};
oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_socket[2] =
{ &oyra_imageOutputPPM_connector_out, 0 };
oyConnectorImaging_s_ oyra_imageOutputPPM_connector_in = {
oyOBJECT_CONNECTOR_IMAGING_S,0,0,
(oyObject_s)&oy_connector_imaging_static_object,
oyCMMgetImageConnectorPlugText, /* getText */
oy_image_connector_texts, /* texts */
"//" OY_TYPE_STD "/image.data", /* connector_type */
oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */
1, /* is_plug == oyFilterPlug_s */
oyra_image_ppm_data_types,
4, /* data_types_n; elements in data_types array */
-1, /* max_color_offset */
1, /* min_channels_count; */
4, /* max_channels_count; */
1, /* min_color_count; */
4, /* max_color_count; */
0, /* can_planar; can read separated channels */
1, /* can_interwoven; can read continuous channels */
0, /* can_swap; can swap color channels (BGR)*/
0, /* can_swap_bytes; non host byte order */
0, /* can_revert; revert 1 -> 0 and 0 -> 1 */
1, /* can_premultiplied_alpha; */
1, /* can_nonpremultiplied_alpha; */
0, /* can_subpixel; understand subpixel order */
0, /* oyCHANNELTYPE_e * channel_types; */
0, /* count in channel_types */
2, /* id; relative to oyFilter_s, e.g. 1 */
0 /* is_mandatory; mandatory flag */
};
oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_plug[2] =
{ &oyra_imageOutputPPM_connector_in, 0 };
/**
* This function implements oyCMMGetText_f.
*
* @version Oyranos: 0.1.10
* @since 2009/12/22 (Oyranos: 0.1.10)
* @date 2009/12/22
*/
const char * oyraApi4ImageWriteUiGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
static char * category = 0;
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "write_ppm";
else if(type == oyNAME_NAME)
return _("Image[write_ppm]");
else
return _("Write PPM Image Filter Object");
}
else if(strcmp(select,"category") == 0)
{
if(!category)
{
STRING_ADD( category, _("Files") );
STRING_ADD( category, _("/") );
STRING_ADD( category, _("Write PPM") );
}
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return category;
else
return category;
}
else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("Option \"filename\", a valid filename");
else
return _("The Option \"filename\" should contain a valid filename to write the ppm data into. A existing file will be overwritten without notice.");
}
return 0;
}
const char * oyra_api4_image_write_ppm_ui_texts[] = {"name", "category", "help", 0};
/** @brief oyra oyCMMapi4_s::ui implementation
*
* The UI for filter write ppm.
*
* @version Oyranos: 0.1.10
* @since 2009/09/09 (Oyranos: 0.1.10)
* @date 2009/12/22
*/
oyCMMui_s_ oyra_api4_image_write_ppm_ui = {
oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */
0,0,0, /* unused oyStruct_s fields; keep to zero */
CMM_VERSION, /**< int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
oyraFilter_ImageOutputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */
oyraWidgetEvent, /* oyWidgetEvent_f */
"Files/Write PPM", /* category */
ppm_write_extra_options, /* const char * options */
oyraPPMwriteUiGet, /* oyCMMuiGet_f oyCMMuiGet */
oyraApi4ImageWriteUiGetText, /* oyCMMGetText_f getText */
oyra_api4_image_write_ppm_ui_texts, /* const char ** texts */
(oyCMMapiFilter_s*)&oyra_api4_image_write_ppm /* oyCMMapiFilter_s*parent */
};
/** @brief oyra oyCMMapi4_s implementation
*
* A filter writing a PPM image.
*
* @par Options:
* - "filename" - the file name to write to
*
* @version Oyranos: 0.1.8
* @since 2008/10/07 (Oyranos: 0.1.8)
* @date 2008/10/07
*/
oyCMMapi4_s_ oyra_api4_image_write_ppm = {
oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */
0,0,0, /* unused oyStruct_s fileds; keep to zero */
(oyCMMapi_s*) & oyra_api7_image_write_ppm, /* oyCMMapi_s * next */
oyraCMMInit, /* oyCMMInit_f */
oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */
/* registration */
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK,
CMM_VERSION, /* int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
(oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */
0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */
{0}, /* char context_type[8] */
(oyCMMui_s_*)&oyra_api4_image_write_ppm_ui /**< oyCMMui_s *ui */
};
char * oyra_api7_image_output_ppm_properties[] =
{
"file=write", /* file read|write */
"image=pixel", /* image type, pixel/vector/font */
"layers=1", /* layer count, one for plain images */
"icc=0", /* image type ICC profile support */
"ext=ppm,pnm,pbm,pgm,pfm", /* supported extensions */
0
};
/** @brief oyra oyCMMapi7_s implementation
*
* A filter writing a PPM image.
*
* @par Options:
* - "filename" - the file name to write to
*
* @version Oyranos: 0.1.8
* @since 2008/10/07 (Oyranos: 0.1.8)
* @date 2008/10/07
*/
oyCMMapi7_s_ oyra_api7_image_write_ppm = {
oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */
0,0,0, /* unused oyStruct_s fileds; keep to zero */
(oyCMMapi_s*) & oyra_api4_image_input_ppm, /* oyCMMapi_s * next */
oyraCMMInit, /* oyCMMInit_f */
oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */
/* registration */
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK,
CMM_VERSION, /* int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
oyraFilterPlug_ImageOutputPPMWrite, /* oyCMMFilterPlug_Run_f */
{0}, /* char data_type[8] */
(oyConnector_s**) oyra_imageOutputPPM_connectors_plug, /* plugs */
1, /* plugs_n */
0, /* plugs_last_add */
(oyConnector_s**) oyra_imageOutputPPM_connectors_socket, /* sockets */
1, /* sockets_n */
0, /* sockets_last_add */
oyra_api7_image_output_ppm_properties /* char * properties */
};
/* ---------------------------------------------------------------------------*/
oyOptions_s* oyraFilter_ImageInputPPMValidateOptions
( oyFilterCore_s * filter,
oyOptions_s * validate,
int statical OY_UNUSED,
uint32_t * result )
{
uint32_t error = !filter;
if(!error)
error = !oyOptions_FindString( validate, "filename", 0 );
*result = error;
return 0;
}
int wread ( unsigned char* data, size_t pos, size_t max, size_t *start, size_t *end )
{
int end_found = 0;
if( max <= 1 ) return 0;
while(pos < max && isspace( data[pos] )) ++pos;
*start = pos;
while(pos < max && !end_found) {
if( isspace( data[pos] ) ) {
end_found = 1;
break;
} else
++pos;
}
*end = pos;
return end_found;
}
/** @func oyraFilterPlug_ImageInputPPMRun
* @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.1.10
* @since 2009/02/18 (Oyranos: 0.1.10)
* @date 2009/02/18
*/
int oyraFilterPlug_ImageInputPPMRun (
oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
oyFilterSocket_s * socket = 0;
oyStruct_s * socket_data = 0;
oyFilterNode_s * node = 0;
oyOptions_s * tags = 0;
int error = 0;
const char * filename = 0;
FILE * fp = 0;
oyDATATYPE_e data_type = oyUINT8;
oyPROFILE_e profile_type = oyEDITING_RGB;
oyProfile_s * prof = 0;
oyImage_s * image_in = 0,
* output_image = 0;
oyPixel_t pixel_type = 0;
int fsize = 0;
size_t fpos = 0;
uint8_t * data = 0, * buf = 0;
size_t mem_n = 0; /* needed memory in bytes */
int info_good = 1;
int32_t icc_profile_flags = 0;
int type = 0; /* PNM type */
int width = 0;
int height = 0;
int spp = 0; /* samples per pixel */
int byteps = 1; /* byte per sample */
double maxval = 0;
size_t start, end;
if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S)
{
socket = oyFilterPlug_GetSocket( requestor_plug );
socket_data = oyFilterSocket_GetData( socket );
}
/* passing through the data reading */
if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S &&
socket_data)
{
error = oyraFilterPlug_ImageRootRun( requestor_plug, ticket );
return error;
} else if(requestor_plug->type_ == oyOBJECT_FILTER_SOCKET_S)
{
/* To open the a image here seems not so straight forward.
* Still the plug-in should be prepared to initialise the image data before
* normal processing occurs.
*/
socket = oyFilterSocket_Copy( (oyFilterSocket_s*)requestor_plug, 0 );
requestor_plug = 0;
}
node = oyFilterSocket_GetNode( socket );
if(error <= 0)
{
oyOptions_s * opts = oyFilterNode_GetOptions( node ,0 );
filename = oyOptions_FindString( opts, "filename", 0 );
oyOptions_FindInt( opts, "icc_profile_flags", 0, &icc_profile_flags );
oyOptions_Release( &opts );
}
if(filename)
fp = fopen( filename, "rm" );
if(!fp)
{
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ " could not open: %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) );
return 1;
}
fseek(fp,0L,SEEK_END);
fsize = ftell(fp);
rewind(fp);
oyAllocHelper_m_( data, uint8_t, fsize, 0, fclose(fp); return 1);
fpos = fread( data, sizeof(uint8_t), fsize, fp );
if( fpos < (size_t)fsize ) {
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ " could not read: %s %d %d",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ), fsize, (int)fpos );
oyFree_m_( data )
fclose (fp);
return FALSE;
}
fpos = 0;
fclose (fp);
fp = NULL;
/* parse Infos */
if(data[fpos] == 'P')
{
if(isdigit(data[++fpos])) {
char tmp[2] = {0, 0};
tmp[0] = data[fpos];
type = atoi(tmp);
} else
if (!isspace(data[fpos]))
{
if(data[fpos] == 'F') /* PFM rgb */
type = -6;
else if (data[fpos] == 'f') /* PFM gray */
type = -5;
else if(data[fpos] == 'H') /* PFM Half rgb */
type = -9;
else if (data[fpos] == 'h') /* PFM Half gray */
type = -8;
else
info_good = 0;
}
else
info_good = 0;
}
fpos++;
/* parse variables */
{
int in_c = 0; /* within comment */
int v_read = 0; /* number of variables allready read */
int v_need = 3; /* number of needed variable; start with three */
int l_end = 0; /* line end position */
int l_pos = 0; /* line position */
int l_rdg = 1; /* line reading */
char * tupltype = NULL; /* ICC profile internal color space */
int tupl = 0;
if(type == 1 || type == 4)
v_need = 2;
if(type == 7) /* pam */
v_need = 12;
while(v_read < v_need && info_good)
{
l_pos = l_end = fpos;
l_rdg = 1;
/* read line */
while(fpos < (size_t)fsize && l_rdg)
{
if(data[fpos-1] == '\n' && data[fpos] == '#')
{
in_c = 1;
l_end = fpos-1;
} else if(data[fpos] == 10 || data[fpos] == 13) { /* line break */
l_rdg = 0;
} else if(data[fpos] != 0) {
if(!in_c)
++l_end;
} else {
l_rdg = 0;
}
if(!l_rdg) {
in_c = 0;
}
++fpos;
}
/* lockup color space */
if(fpos - l_pos > 0)
{
if(fpos - l_pos >= 14 && memcmp(&data[l_pos],"# COLORSPACE: ", 14) == 0)
{
char * t = oyAllocateFunc_(fpos - l_pos + 1);
if(t)
{
memcpy( t, &data[l_pos+14], fpos - l_pos - 15 );
t[fpos - l_pos - 15] = 0;
prof = oyProfile_FromName(t, icc_profile_flags, NULL);
if(prof)
{
if(oy_debug)
oyra_msg( oyMSG_DBG, (oyStruct_s*)node,
OY_DBG_FORMAT_ "found ICC: %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( t ) );
} else
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "could not find ICC: %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( t ) );
oyDeAllocateFunc_(t);
}
}
}
if(!prof && getenv("COLORSPACE"))
{
const char * t = getenv("COLORSPACE");
prof = oyProfile_FromName(t, icc_profile_flags, NULL);
if(!prof)
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( t ) );
}
/* parse line */
while(info_good &&
v_read < v_need &&
l_pos < l_end)
{
if( info_good )
{
double var = -2;
char var_s[64];
int l = 0;
wread ( data, l_pos, l_end, &start, &end );
l = end - start;
if ( l < 63 )
{
memcpy(var_s, &data[start], l);
var_s[l] = 0;
oyStringToDouble(var_s, &var);
# ifdef DEBUG_
fprintf(stderr, "var = \"%s\" %d\n",var_s, l);
# endif
}
l_pos = end + 1;
if(type == 7)
{
if(height == -1)
height = (int)var;
if(width == -1)
width = (int)var;
if(spp == -1)
spp = (int)var;
if(maxval == -0.5)
maxval = var;
if(tupl == -1)
{
tupl = 1;
tupltype = oyStringCopy(var_s, oyAllocateFunc_);
}
if(strcmp(var_s, "HEIGHT") == 0)
height = -1; /* expecting the next token is the val */
if(strcmp(var_s, "WIDTH") == 0)
width = -1;
if(strcmp(var_s, "DEPTH") == 0)
spp = -1;
if(strcmp(var_s, "MAXVAL") == 0)
maxval = -0.5;
if(strcmp(var_s, "TUPLTYPE") == 0)
tupl = -1;
if(strcmp(var_s, "ENDHDR") == 0)
v_need = v_read;
}
else
{
if (!var)
info_good = 0;
if(v_read == 0)
width = (int)var;
else if(v_read == 1)
height = (int)var;
else if(v_read == 2)
maxval = var;
}
++v_read;
}
}
}
if(tupltype && !prof)
{
const char * colorspace = "rgbi";
if(strcmp(tupltype, "GRAY") == 0 ||
strcmp(tupltype, "GRAY_ALPHA") == 0)
colorspace = "grayi";
if(strcmp(tupltype, "RGB") == 0 ||
strcmp(tupltype, "RGB_ALPHA") == 0)
colorspace = "rgbi";
if(strcmp(tupltype, "CMYK") == 0 ||
strcmp(tupltype, "CMYK_ALPHA") == 0)
colorspace = "cmyki";
prof = oyProfile_FromName( colorspace, icc_profile_flags, NULL );
if(!prof)
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( tupltype ) );
oyFree_m_(tupltype)
}
}
if(strstr(strrchr(filename, '.')+1, "raw"))
{
const char * t;
info_good = 1;
t = getenv("RAW_WIDTH");
if(t)
width = atoi(t);
else
info_good = 0;
t = getenv("RAW_HEIGHT");
if(t)
height = atoi(t);
else
info_good = 0;
t = getenv("RAW_TYPE");
if(t)
type = atoi(t);
else
info_good = 0;
fpos = 0;
t = getenv("RAW_MAXVAL");
if(t)
maxval = atoi(t);
else
info_good = 0;
if(info_good == 0)
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "need RAW_WIDTH, RAW_HEIGHT, RAW_TYPE and RAW_MAXVAL environment variables",
OY_DBG_ARGS_ );
}
if(info_good)
switch(type) {
case 1:
case 4:
data_type = oyUINT8;
spp = 1;
info_good = 0;
break;
case 2:
case 5:
if(maxval <= 255)
{
data_type = oyUINT8;
byteps = 1;
} else if (maxval <= 65535) {
data_type = oyUINT16;
byteps = 2;
}
spp = 1;
break;
case 3:
case 6:
if(maxval <= 255)
{
data_type = oyUINT8;
byteps = 1;
} else if (maxval <= 65535) {
data_type = oyUINT16;
byteps = 2;
}
spp = 3;
break;
case -5:
data_type = oyFLOAT;
byteps = 4;
spp = 1;
break;
case -6:
byteps = 4;
spp = 3;
data_type = oyFLOAT;
break;
case -8:
data_type = oyHALF;
byteps = 2;
spp = 1;
break;
case -9:
byteps = 2;
spp = 3;
data_type = oyHALF;
break;
case 7: /* pam */
if (maxval == 1.0 || maxval == -1.0)
{
byteps = 4;
data_type = oyFLOAT;
} else if(maxval <= 255) {
byteps = 1;
data_type = oyUINT8;
} else if (maxval <= 65535) {
byteps = 2;
data_type = oyUINT16;
}
break;
default:
info_good = 0;
}
switch(spp)
{
case 1:
profile_type = oyASSUMED_GRAY;
break;
case 2:
profile_type = oyASSUMED_GRAY;
break;
case 3:
profile_type = oyASSUMED_RGB;
break;
case 4:
profile_type = oyASSUMED_RGB;
break;
}
if( !info_good )
{
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "failed to get info of %s",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ));
oyFree_m_( data )
return FALSE;
}
/* check if the file can hold the expected data (for raw only) */
mem_n = width*height*byteps*spp;
if(type == 5 || type == 6 || type == -5 || type == -6 || type == -8 || type == -9 || type == 7)
{
if (mem_n > fsize-fpos)
{
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "\n storage size of %s is too small: %d",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ),
(int)mem_n-fsize-fpos );
oyFree_m_( data )
return FALSE;
}
} else
{
if (type == 2 || type == 3) {
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "\n %s contains ascii data, which are not handled by this pnm reader",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ));
} else if (type == 1 || type == 4) {
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "\n %s contains bitmap data, which are not handled by this pnm reader",
OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) );
}
oyFree_m_( data )
return FALSE;
}
oyAllocHelper_m_( buf, uint8_t, mem_n, 0, oyFree_m_( data ); return 1);
DBG_NUM2_S("allocate image data: 0x%x size: %d ", (int)(intptr_t)
buf, mem_n );
/* the following code is almost completely taken from ku.b's ppm CP plug-in */
{
int h, j_h = 0, p, n_samples = 0, n_bytes = 0;
int byte_swap = 0;
unsigned char *d_8 = 0;
unsigned char *src = &data[fpos];
uint16_t *d_16;
half *d_f16;
float *d_f;
int adapt = 0;
if(oyBigEndian())
{
if( maxval < 0 &&
(byteps == 2 || byteps == 4) )
byte_swap = 1;
} else
{
if( maxval > 0 &&
(byteps == 2 || byteps == 4) )
byte_swap = 1;
}
maxval = fabs(maxval);
for(h = 0; h < height; ++h)
{
n_samples = 1 * width * spp;
n_bytes = n_samples * byteps;
d_8 = buf;
d_16 = (uint16_t*)buf;
d_f16= (half*)buf;
d_f = (float*)buf;
/* TODO 1 bit raw and ascii */
if (type == 1 || type == 4) {
/* TODO ascii */
} else if (type == 2 || type == 3) {
/* raw and floats */
} else if (type == 5 || type == 6 ||
type == -5 || type == -6 ||
type == -8 || type == -9 ||
type == 7 )
{
if(byteps == 1) {
d_8 = &src[ h * width * spp * byteps ];
} else if(byteps == 2) {
d_f16 = d_16 = (uint16_t*)& src[ h * width * spp * byteps ];
} else if(byteps == 4) {
d_f = (float*)&src[ h * width * spp * byteps ];
}
memcpy (&buf[ h * width * spp * byteps ],
&src[ (j_h + h) * width * spp * byteps ],
1 * width * spp * byteps);
}
/* normalise and byteswap */
if( byte_swap )
{
unsigned char *c_buf = &buf[ h * width * spp * byteps ];
char tmp;
adapt |= 1;
if (byteps == 2) { /* 16 bit */
#pragma omp parallel for private(tmp)
for (p = 0; p < n_bytes; p += 2)
{
tmp = c_buf[p];
c_buf[p] = c_buf[p+1];
c_buf[p+1] = tmp;
}
} else if (byteps == 4) { /* float */
#pragma omp parallel for private(tmp)
for (p = 0; p < n_bytes; p += 4)
{
tmp = c_buf[p];
c_buf[p] = c_buf[p+3];
c_buf[p+3] = tmp;
tmp = c_buf[p+1];
c_buf[p+1] = c_buf[p+2];
c_buf[p+2] = tmp;
}
}
}
if (byteps == 1 && maxval < 255) { /* 8 bit */
adapt |= 2;
#pragma omp parallel for
for (p = 0; p < n_samples; ++p)
d_8[p] = (d_8[p] * 255) / maxval;
} else if (byteps == 2 && maxval != 1.0 &&
(type == -8 || type == -9)) { /* half float */
adapt |= 2;
#pragma omp parallel for
for (p = 0; p < n_samples; ++p)
d_f16[p] = d_f16[p] * maxval;
} else if (byteps == 2 && maxval < 65535 &&
type != -8 && type != -9) {/* 16 bit */
adapt |= 2;
#pragma omp parallel for
for (p = 0; p < n_samples; ++p)
d_16 [p] = (d_16[p] * 65535) / maxval;
} else if (byteps == 4 && maxval != 1.0) { /* float */
adapt |= 2;
#pragma omp parallel for
for (p = 0; p < n_samples; ++p)
d_f[p] = d_f[p] * maxval;
}
}
if((adapt & 1) && oy_debug)
oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_
"going to swap bytes %d %d", OY_DBG_ARGS_, byteps, n_bytes );
if((adapt & 2) && oy_debug)
oyra_msg( oyMSG_DBG, (oyStruct_s*)node,
OY_DBG_FORMAT_ "going to adapt intensity %g %d", OY_DBG_ARGS_, maxval, n_samples );
}
pixel_type = oyChannels_m(spp) | oyDataType_m(data_type);
if(!prof)
prof = oyProfile_FromStd( profile_type, icc_profile_flags, 0 );
image_in = oyImage_Create( width, height, buf, pixel_type, prof, 0 );
if (!image_in)
{
oyra_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_ "PNM can't create a new image\n%dx%d %d",
OY_DBG_ARGS_, width, height, pixel_type );
oyFree_m_ (data)
return FALSE;
}
tags = oyImage_GetTags( image_in );
error = oyOptions_SetFromString( &tags,
"//" OY_TYPE_STD "/file_read.input_ppm"
"/filename",
filename, OY_CREATE_NEW );
oyOptions_Release( &tags );
if(error <= 0)
{
oyFilterSocket_SetData( socket, (oyStruct_s*)image_in );
}
if(ticket)
output_image = oyPixelAccess_GetOutputImage( ticket );
if(ticket &&
output_image &&
oyImage_GetWidth( output_image ) == 0 &&
oyImage_GetHeight( output_image ) == 0)
{
oyImage_SetCritical( output_image, oyImage_GetPixelLayout( image_in,
oyLAYOUT ),
0,0,
oyImage_GetWidth( image_in ),
oyImage_GetHeight( image_in ) );
}
oyImage_Release( &image_in );
oyImage_Release( &output_image );
oyFilterNode_Release( &node );
oyFilterSocket_Release( &socket );
oyFree_m_ (data)
/* return an error to cause the graph to retry */
return 1;
}
const char ppm_read_extra_options[] = {
"\n\
<" OY_TOP_SHARED ">\n\
<" OY_DOMAIN_INTERNAL ">\n\
<" OY_TYPE_STD ">\n\
<" "file_read" ">\n\
<filename></filename>\n\
</" "file_read" ">\n\
</" OY_TYPE_STD ">\n\
</" OY_DOMAIN_INTERNAL ">\n\
</" OY_TOP_SHARED ">\n"
};
int oyraPPMreadUiGet ( oyCMMapiFilter_s * module OY_UNUSED,
oyOptions_s * opts OY_UNUSED,
int flags OY_UNUSED,
char ** xforms_layout,
oyAlloc_f allocateFunc )
{
char * text = (char*)allocateFunc(5);
text[0] = 0;
*xforms_layout = text;
return 0;
}
oyConnectorImaging_s_ oyra_imageInputPPM_connector = {
oyOBJECT_CONNECTOR_IMAGING_S,0,0,
(oyObject_s)&oy_connector_imaging_static_object,
oyCMMgetImageConnectorSocketText, /* getText */
oy_image_connector_texts, /* texts */
"//" OY_TYPE_STD "/image.data", /* connector_type */
oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */
0, /* is_plug == oyFilterPlug_s */
oyra_image_ppm_data_types,
4, /* data_types_n; elements in data_types array */
-1, /* max_color_offset */
1, /* min_channels_count; */
4, /* max_channels_count; */
1, /* min_color_count; */
4, /* max_color_count; */
0, /* can_planar; can read separated channels */
1, /* can_interwoven; can read continuous channels */
0, /* can_swap; can swap color channels (BGR)*/
0, /* can_swap_bytes; non host byte order */
0, /* can_revert; revert 1 -> 0 and 0 -> 1 */
1, /* can_premultiplied_alpha; */
1, /* can_nonpremultiplied_alpha; */
0, /* can_subpixel; understand subpixel order */
0, /* oyCHANNELTYPE_e * channel_types; */
0, /* count in channel_types */
1, /* id; relative to oyFilter_s, e.g. 1 */
0 /* is_mandatory; mandatory flag */
};
oyConnectorImaging_s_ * oyra_imageInputPPM_connectors[2] =
{ &oyra_imageInputPPM_connector, 0 };
/**
* This function implements oyCMMGetText_f.
*
* @version Oyranos: 0.1.10
* @since 2009/12/22 (Oyranos: 0.1.10)
* @date 2009/12/22
*/
const char * oyraApi4ImageInputUiGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
static char * category = 0;
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "input_ppm";
else if(type == oyNAME_NAME)
return _("Image[input_ppm]");
else
return _("Input PPM Image Filter Object");
}
else if(strcmp(select,"category") == 0)
{
if(!category)
{
STRING_ADD( category, _("Files") );
STRING_ADD( category, _("/") );
STRING_ADD( category, _("Read PPM") );
}
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return category;
else
return category;
}
else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("Option \"filename\", a valid filename of a existing PPM image");
else
return _("The Option \"filename\" should contain a valid filename to read the ppm data from. If the file does not exist, a error will occure.\nThe oyEDITING_RGB ICC profile is attached.");
}
return 0;
}
const char * oyra_api4_image_input_ppm_ui_texts[] = {"name", "category", "help", 0};
/** @brief oyra oyCMMapi4_s::ui implementation
*
* The UI for filter input ppm.
*
* @version Oyranos: 0.1.10
* @since 2009/09/09 (Oyranos: 0.1.10)
* @date 2009/09/09
*/
oyCMMui_s_ oyra_api4_ui_image_input_ppm = {
oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */
0,0,0, /* unused oyStruct_s fields; keep to zero */
CMM_VERSION, /**< int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
oyraFilter_ImageInputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */
oyraWidgetEvent, /* oyWidgetEvent_f */
"Files/Read PPM", /* category */
ppm_read_extra_options, /* const char * options */
oyraPPMreadUiGet, /* oyCMMuiGet_f oyCMMuiGet */
oyraApi4ImageInputUiGetText, /* oyCMMGetText_f getText */
oyra_api4_image_input_ppm_ui_texts, /* const char ** texts */
(oyCMMapiFilter_s*)&oyra_api4_image_input_ppm /* oyCMMapiFilter_s*parent */
};
/** @brief oyra oyCMMapi4_s implementation
*
* A filter for reading a PPM image.
*
* @par Options:
* - "filename" - the file name to read from
*
* @version Oyranos: 0.1.10
* @since 2009/02/18 (Oyranos: 0.1.10)
* @date 2009/02/18
*/
oyCMMapi4_s_ oyra_api4_image_input_ppm = {
oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */
0,0,0, /* unused oyStruct_s fileds; keep to zero */
(oyCMMapi_s*) & oyra_api7_image_input_ppm, /* oyCMMapi_s * next */
oyraCMMInit, /* oyCMMInit_f */
oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */
/* registration */
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK,
CMM_VERSION, /* int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
(oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */
0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */
{0}, /* char context_type[8] */
(oyCMMui_s_*)&oyra_api4_ui_image_input_ppm /**< oyCMMui_s *ui */
};
char * oyra_api7_image_input_ppm_properties[] =
{
"file=read", /* file read|write */
"image=pixel", /* image type, pixel/vector/font */
"layers=1", /* layer count, one for plain images */
"icc=1", /* image type ICC profile support */
"ext=pam,ppm,pnm,pbm,pgm,pfm,raw", /* supported extensions */
0
};
/** @brief oyra oyCMMapi7_s implementation
*
* A filter reading a PPM image.
*
* @par Options:
* - "filename" - the file name to read from
*
* @version Oyranos: 0.1.10
* @since 2009/02/18 (Oyranos: 0.1.10)
* @date 2009/02/18
*/
oyCMMapi7_s_ oyra_api7_image_input_ppm = {
oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */
0,0,0, /* unused oyStruct_s fileds; keep to zero */
(oyCMMapi_s*) & oyra_api4_image_load, /* oyCMMapi_s * next */
oyraCMMInit, /* oyCMMInit_f */
oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */
/* registration */
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK,
CMM_VERSION, /* int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
oyraFilterPlug_ImageInputPPMRun, /* oyCMMFilterPlug_Run_f */
{0}, /* char data_type[8] */
0, /* plugs */
0, /* plugs_n */
0, /* plugs_last_add */
(oyConnector_s**) oyra_imageInputPPM_connectors, /* sockets */
1, /* sockets_n */
0, /* sockets_last_add */
oyra_api7_image_input_ppm_properties /* char ** properties */
};
|
GB_unop__isinf_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fc64)
// op(A') function: GB (_unop_tran__isinf_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisinf (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
5446.c |
/*
* Compile using the command:
* `cc 27Stencil.c -o oa -fopenmp -lm`
*/
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#define DEFAULT_DATASIZE 1048576 /* Default datasize. */
#define DEFAULT_REPS 10 /* Default repetitions. */
#define CONF95 1.96
#define ITERATIONS 10
#define FAC (1./26)
#define TOLERANCE 1.0e-15
extern int reps; /* Repetitions. */
extern double *times; /* Array to store results in. */
extern int flag; /* Flag to set CPU or GPU invocation. */
extern unsigned int datasize; /* Datasize passed to benchmark functions. */
unsigned int datasize = -1; /* Datasize for tests in bytes. */
int reps = -1; /* Repetitions. */
double *times; /* Array of doubles storing the benchmark times in microseconds. */
double testtime; /* The average test time in microseconds for reps runs. */
double testsd; /* The standard deviation in the test time in microseconds for reps runs. */
int flag = 0; /* 0 indicates CPU. */
/*
* Function prototypes for common functions.
*/
void init(int argc, char **argv);
void finalisetest(char *);
void finalise(void);
void benchmark(char *, double (*test)(void));
void print_results(char *, double, double);
/* Forward Declarations of utility functions*/
double max_diff(double *, double *, int);
void wul();
void usage(char *argv[]) {
printf("Usage: %s \n"
"\t--reps <repetitions> (default %d)\n"
"\t--datasize <datasize> (default %d bytes)\n",
argv[0],
DEFAULT_REPS, DEFAULT_DATASIZE);
}
/*
* This function parses the parameters from the command line.
*/
void parse_args(int argc, char *argv[]) {
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--reps") == 0) {
reps = atoi(argv[++arg]);
if (reps == 0) {
printf("Invalid integer:--reps: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--datasize") == 0) {
datasize = atoi(argv[++arg]);
if (datasize == 0) {
printf("Invalid integer:--datasize: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd;
int i, good_reps;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
good_reps = 0;
for (i = 0; i < reps; i++) {
/* Skip entries where times is 0, this indicates an error occured */
if (times[i] != 0){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
good_reps++;
}
}
meantime = totaltime / good_reps;
sumsq = 0;
for (i = 0; i < reps; i++) {
if (times[i] != 0){
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
}
sd = sqrt(sumsq / good_reps);
*mtp = meantime;
*sdp = sd;
}
/*
* This function prints the results of the tests.
* If you use a compiler which sets a different preprocessor flag
* you may wish to add it here.
*/
void print_results(char *name, double testtime, double testsd) {
char compiler[20];
/* Set default compiler idetifier. */
sprintf(compiler, "COMPILER");
/* Set compiler identifier based on known preprocessor flags. */
#ifdef __PGI
sprintf(compiler, "PGI");
#endif
#ifdef __HMPP
sprintf(compiler, "CAPS");
#endif
//printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6);
printf("%f\n", testtime*1e6);
}
/*
* This function initialises the storage for the test results and set the defaults.
*/
void init(int argc, char **argv)
{
parse_args(argc, argv);
if (reps == -1) {
reps = DEFAULT_REPS;
}
if (datasize == (unsigned int)-1) {
datasize = DEFAULT_DATASIZE;
}
times = (double *)malloc((reps) * sizeof(double));
/*
#ifdef __PGI
acc_init(acc_device_nvidia);
// printf("PGI INIT\n");
#endif
#ifdef __HMPP
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
#ifdef _CRAYC
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
*/
}
void finalise(void) {
free(times);
}
/*
* This function runs the benchmark specified.
*/
void benchmark(char *name, double (*test)(void))
{
int i = 0;
double tmp = 0;
for (i=0; i<reps; i++) {
tmp = test();
if (tmp == -10000){
printf("Memory allocation failure in %s\n", name);
times[i] = 0;
}
else if (tmp == -11000){
printf("CPU/GPU mismatch in %s\n", name);
times[i] = 0;
}
else{
times[i] = tmp;
}
}
stats(&testtime, &testsd);
//printf("in benchmark\n");
print_results(name, testtime, testsd);
//printf("printed result\n");
}
double stencil()
{
extern unsigned int datasize;
int sz = cbrt((datasize/sizeof(double))/2);
int i, j, k, iter;
int n = sz-2;
double fac = FAC;
double t1, t2;
double md;
//printf("size = %d\n", sz);
/* Work buffers, with halos */
double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz);
if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){
/* Something went wrong in the memory allocation here, fail gracefully */
return(-10000);
}
/* initialize input array a0 */
/* zero all of array (including halos) */
//printf("size = %d\n", sz);
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = 0.0;
//printf("%d\t", (i*sz*sz+j*sz+k));
}
}
}
//printf("\n");
//int size_of_a0 = sizeof(a0) / sizeof(*a0);
//printf("size of a0 = %d\n", size_of_a0);
/* use random numbers to fill interior */
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX);
}
}
}
/* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */
/* save initial input array for later GPU run */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
}
}
}
//printf("Host computation\n");
/* run main computation on host */
for (iter = 0; iter < ITERATIONS; iter++) {
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
/* save result */
/* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
// printf("%lf\t", a0[i*sz*sz+j*sz+k]);
}
}
}
//int size = sizeof(host_result)/sizeof(host_result[0]);
//for(i = 0; i < size; i++) {
// printf("%lf\t", host_result[i]);
//}
//printf("\n");
/* copy initial array back to a0 */
/* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k];
}
}
}
//printf("Starting acc pragma code\n");
t1 = omp_get_wtime();
#pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n)
{
for (iter = 0; iter < ITERATIONS; iter++) {
#pragma omp parallel for schedule(dynamic, 16) collapse(2) num_threads(28)
for (i = 1; i < n+1; i++) {
#pragma omp parallel for schedule(dynamic, 16) collapse(2) num_threads(28)
for (j = 1; j < n+1; j++) {
#pragma omp parallel for schedule(dynamic, 16) num_threads(28)
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
#pragma acc parallel loop
for (i = 1; i < n+1; i++) {
#pragma acc loop
for (j = 1; j < n+1; j++) {
#pragma acc loop
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
} /* end data region */
#pragma acc wait
t2 = omp_get_wtime();
memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz);
md = max_diff(&host_result[0],&device_result[0], sz);
/* Free malloc'd memory to prevent leaks */
free(a0);
free(a0_init);
free(a1);
free(host_result);
free(device_result);
//printf("md: %lf \t tolerance: %lf", md, TOLERANCE);
if (md < TOLERANCE ){
//printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE);
return(t2 - t1);
}
else{
// printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md);
return(-11000);
}
}
/* Utility Functions */
double max_diff(double *array1,double *array2, int sz)
{
double tmpdiff, diff;
int i,j,k;
int n = sz-2;
diff=0.0;
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]);
//printf("diff: %lf", tmpdiff);
if (tmpdiff > diff) diff = tmpdiff;
}
}
}
return diff;
}
/*
* This function ensures the device is awake.
* It is more portable than acc_init().
*/
void wul(){
int data = 8192;
double *arr_a = (double *)malloc(sizeof(double) * data);
double *arr_b = (double *)malloc(sizeof(double) * data);
int i = 0;
if (arr_a==NULL||arr_b==NULL) {
printf("Unable to allocate memory in wul.\n");
}
for (i=0;i<data;i++){
arr_a[i] = (double) (rand()/(1.0+RAND_MAX));
}
#pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data])
{
#pragma acc parallel loop
for (i=0;i<data;i++){
arr_b[i] = arr_a[i] * 2;
}
}
if (arr_a[0] < 0){
printf("Error in WUL\n");
/*
* This should never be called as rands should be in the range (0,1].
* This stops clever optimizers.
*/
}
free(arr_a);
free(arr_b);
}
int main(int argc, char **argv) {
char testName[32];
//printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n");
/* Initialise storage for test results & parse input arguements. */
init(argc, argv);
/* Ensure device is awake. */
wul();
sprintf(testName, "27S");
benchmark(testName, &stencil);
/* Print results & free results storage */
finalise();
return EXIT_SUCCESS;
}
|
batchnorm_tpp.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Kirill Voronin (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <libxsmm_intrinsics_x86.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#define BITS_PER_CHAR (8)
typedef enum my_bn_fuse {
MY_BN_FUSE_NONE = 0,
MY_BN_FUSE_RELU = 1,
MY_BN_FUSE_ELTWISE = 2,
MY_BN_FUSE_ELTWISE_RELU = 3,
MY_BN_FUSE_RELU_WITH_MASK = 4,
MY_BN_FUSE_ELTWISE_RELU_WITH_MASK = 5
} my_bn_fuse;
typedef enum my_bn_norm_type {
MY_BN_FULL_NORM = 0, /* stats + normalize for fwd, all grads for bwd */
MY_BN_SCALE_ONLY = 1 /* normalize only for fwd, only input grad for bwd */
} my_bn_norm_type;
typedef struct my_bn_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint H;
libxsmm_blasint W;
libxsmm_blasint bc;
libxsmm_blasint CP;
libxsmm_blasint num_HW_blocks;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_datatype datatype_in;
libxsmm_datatype datatype_out;
libxsmm_datatype datatype_comp;
libxsmm_barrier* barrier;
libxsmm_matrix_eqn_function func10;
libxsmm_meltwfunction_unary reduce_HW_kernel;
libxsmm_meltwfunction_unary all_zero_kernel;
libxsmm_meltwfunction_binary helper_add_kernel;
my_bn_fuse fuse_type;
} my_bn_fwd_config;
typedef struct my_bn_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint H;
libxsmm_blasint W;
libxsmm_blasint bc;
libxsmm_blasint CP;
libxsmm_blasint num_HW_blocks;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_datatype datatype_in;
libxsmm_datatype datatype_out;
libxsmm_datatype datatype_comp;
libxsmm_barrier* barrier;
libxsmm_matrix_eqn_function dgamma_func;
libxsmm_matrix_eqn_function dbeta_func;
libxsmm_matrix_eqn_function din_func;
libxsmm_meltwfunction_unary all_zero_kernel;
libxsmm_meltwfunction_binary helper_add_kernel;
libxsmm_meltwfunction_unary helper_copy_kernel;
libxsmm_meltwfunction_unary inv_relu_kernel;
libxsmm_meltwfunction_unary ewise_copy_kernel;
my_bn_fuse fuse_type;
} my_bn_bwd_config;
my_bn_fwd_config setup_my_bn_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint bc,
libxsmm_blasint threads, my_bn_fuse fuse_type,
libxsmm_datatype datatype_in, libxsmm_datatype datatype_out, libxsmm_datatype datatype_comp ) {
my_bn_fwd_config res;
size_t sum_N_offset, sumsq_N_offset;
libxsmm_meltw_unary_shape unary_shape;
libxsmm_meltw_binary_shape binary_shape;
libxsmm_bitfield unary_flags;
libxsmm_bitfield binary_flags;
libxsmm_bitfield ternary_flags;
libxsmm_blasint ldo = bc;
libxsmm_blasint ld = bc;
libxsmm_blasint tmp_ld, tmp_ld2;
libxsmm_blasint my_eqn10;
libxsmm_meqn_arg_shape eqn_out_arg_shape;
libxsmm_meqn_arg_shape arg_shape[128];
libxsmm_matrix_arg_attributes arg_singular_attr;
libxsmm_matrix_eqn_arg_metadata arg_metadata[128];
libxsmm_matrix_eqn_op_metadata op_metadata[128];
arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR;
memset( &res, 0, sizeof(res));
/* setting up some handle values */
res.N = N;
res.C = C;
res.H = H;
res.W = W;
res.bc = bc;
res.CP = res.C / res.bc;
res.num_HW_blocks = (res.H > res.W ? res.H : res.W );
res.threads = threads;
res.fuse_type = fuse_type;
/* when masking is on, bc must be divisible by 8 for compressing mask into char array (otherwise strides are wrong for relumask */
if ( (res.fuse_type == 4 || res.fuse_type == 5) && (res.bc % BITS_PER_CHAR != 0)) {
fprintf( stderr, "bc = %d is not divisible by BITS_PER_CHAR = %d. Bailing...!\n", res.bc, BITS_PER_CHAR);
exit(-1);
}
res.datatype_in = datatype_in;
res.datatype_out = datatype_out;
res.datatype_comp = datatype_comp;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
memset( &unary_shape, 0, sizeof(libxsmm_meltw_unary_shape));
memset( &binary_shape, 0, sizeof(libxsmm_meltw_binary_shape));
/* Eltwise TPPs */
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp);
res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags);
if ( res.all_zero_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd all_zero_kernel failed. Bailing...!\n");
exit(-1);
}
binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ldo, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp);
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
res.helper_add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags);
if ( res.helper_add_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd helper_add_kernel failed. Bailing...!\n");
exit(-1);
}
/* TPPs for reducing X and X2 in HW*/
tmp_ld = bc;
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ld, tmp_ld, res.datatype_in, res.datatype_comp, res.datatype_comp);
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
res.reduce_HW_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD, unary_shape, unary_flags);
if ( res.reduce_HW_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd reduce_HW_kernel failed. Bailing...!\n");
exit(-1);
}
/* TPP for scaling */
ld = bc;
tmp_ld = 1;
tmp_ld2 = 1;
my_eqn10 = libxsmm_matrix_eqn_create(); /* y = relu ( ( (s*x + b)*gamma + beta ) + inp_add) */
if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) {
unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE);
op_metadata[3].eqn_idx = my_eqn10;
op_metadata[3].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_UNARY_RELU, res.datatype_out, unary_flags);
if (res.datatype_out == LIBXSMM_DATATYPE_BF16)
libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, res.datatype_out, LIBXSMM_MELTW_FLAG_UNARY_NONE);
}
if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) {
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
op_metadata[2].eqn_idx = my_eqn10;
op_metadata[2].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags);
}
ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT;
op_metadata[0].eqn_idx = my_eqn10;
op_metadata[0].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags);
arg_metadata[3].eqn_idx = my_eqn10;
arg_metadata[3].in_arg_pos = 3;
arg_shape[3].m = res.bc; /* gamma = [bc] */
arg_shape[3].n = 1;
arg_shape[3].ld = tmp_ld2;
arg_shape[3].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr);
ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT;
op_metadata[1].eqn_idx = my_eqn10;
op_metadata[1].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags);
arg_metadata[1].eqn_idx = my_eqn10;
arg_metadata[1].in_arg_pos = 1;
arg_shape[1].m = res.bc; /* s = [bc] */
arg_shape[1].n = 1;
arg_shape[1].ld = tmp_ld;
arg_shape[1].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr);
arg_metadata[0].eqn_idx = my_eqn10;
arg_metadata[0].in_arg_pos = 0;
arg_shape[0].m = res.bc; /* x = [HW, bc] */
arg_shape[0].n = res.H*res.W /res.num_HW_blocks;
arg_shape[0].ld = ld;
arg_shape[0].type = res.datatype_in;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr);
arg_metadata[2].eqn_idx = my_eqn10;
arg_metadata[2].in_arg_pos = 2;
arg_shape[2].m = res.bc; /* b = [bc] */
arg_shape[2].n = 1;
arg_shape[2].ld = tmp_ld;
arg_shape[2].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr);
arg_metadata[4].eqn_idx = my_eqn10;
arg_metadata[4].in_arg_pos = 4;
arg_shape[4].m = res.bc; /* beta = [bc] */
arg_shape[4].n = 1;
arg_shape[4].ld = tmp_ld2;
arg_shape[4].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr);
if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) {
arg_metadata[5].eqn_idx = my_eqn10;
arg_metadata[5].in_arg_pos = 5;
arg_shape[5].m = res.bc; /* inp_add = [HW, bc] */
arg_shape[5].n = res.H*res.W / res.num_HW_blocks;
arg_shape[5].ld = ld;
arg_shape[5].type = res.datatype_in;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[5], arg_shape[5], arg_singular_attr);
}
eqn_out_arg_shape.m = res.bc; /* y = [HW, bc] */
eqn_out_arg_shape.n = res.H*res.W / res.num_HW_blocks;
eqn_out_arg_shape.ld = ld;
eqn_out_arg_shape.type = res.datatype_out;
/* libxsmm_matrix_eqn_tree_print( my_eqn10 ); */
/* libxsmm_matrix_eqn_rpn_print ( my_eqn10 ); */
res.func10 = libxsmm_dispatch_matrix_eqn_v2( my_eqn10, eqn_out_arg_shape );
if ( res.func10 == NULL) {
fprintf( stderr, "JIT for TPP fwd func10 (eqn10) failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
sum_N_offset = LIBXSMM_UP2(res.CP * 2 * res.bc, 64);
sumsq_N_offset = LIBXSMM_UP2(sum_N_offset + res.CP * res.N * res.bc, 64);
res.scratch_size = sizeof(float) * ( sumsq_N_offset /*sum_X_X2 + sumsq_N */ + LIBXSMM_UP2((size_t)res.CP * (size_t)res.N * (size_t)res.bc, 64) /* sumsq_N */ );
return res;
}
my_bn_bwd_config setup_my_bn_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint H, libxsmm_blasint W, libxsmm_blasint bc,
libxsmm_blasint threads, my_bn_fuse fuse_type,
libxsmm_datatype datatype_in, libxsmm_datatype datatype_out, libxsmm_datatype datatype_comp ) {
my_bn_bwd_config res;
libxsmm_meltw_unary_shape unary_shape;
libxsmm_meltw_binary_shape binary_shape;
libxsmm_bitfield unary_flags;
libxsmm_bitfield binary_flags;
libxsmm_bitfield ternary_flags;
libxsmm_meqn_arg_shape eqn_out_arg_shape;
libxsmm_meqn_arg_shape arg_shape[128];
libxsmm_matrix_arg_attributes arg_singular_attr;
libxsmm_matrix_eqn_arg_metadata arg_metadata[128];
libxsmm_matrix_eqn_op_metadata op_metadata[128];
arg_singular_attr.type = LIBXSMM_MATRIX_ARG_TYPE_SINGULAR;
size_t dbeta_N_offset;
libxsmm_blasint ldo = bc;
libxsmm_blasint ld = bc;
libxsmm_blasint tmp_ld2;
libxsmm_blasint my_eqn11, my_eqn12, my_eqn16;
memset( &res, 0, sizeof(res));
/* setting up some handle values */
res.N = N;
res.C = C;
res.H = H;
res.W = W;
res.bc = bc;
res.CP = res.C / res.bc;
res.num_HW_blocks = (res.H > res.W ? res.H : res.W );
res.threads = threads;
res.fuse_type = fuse_type;
/* when masking is on, bc must be divisible by 8 for compressing mask into char array (otherwise strides are wrong for relumask */
if ( (res.fuse_type == 4 || res.fuse_type == 5) && (res.bc % BITS_PER_CHAR != 0)) {
fprintf( stderr, "bc = %d is not divisible by BITS_PER_CHAR = %d. Bailing...!\n", res.bc, BITS_PER_CHAR);
exit(-1);
}
res.datatype_in = datatype_in;
res.datatype_out = datatype_out;
res.datatype_comp = datatype_comp;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
memset( &unary_shape, 0, sizeof(libxsmm_meltw_unary_shape));
memset( &binary_shape, 0, sizeof(libxsmm_meltw_binary_shape));
/* Eltwise TPPs */
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, res.bc, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp);
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
res.all_zero_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_XOR, unary_shape, unary_flags);
if ( res.all_zero_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd all_zero_kernel failed. Bailing...!\n");
exit(-1);
}
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, 1, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp);
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
res.helper_copy_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, unary_shape, unary_flags);
if ( res.helper_copy_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd helper_copy_kernel failed. Bailing...!\n");
exit(-1);
}
binary_shape = libxsmm_create_meltw_binary_shape(res.bc, 1, ldo, ldo, ldo, res.datatype_comp, res.datatype_comp, res.datatype_comp);
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
res.helper_add_kernel = libxsmm_dispatch_meltw_binary_v2(LIBXSMM_MELTW_TYPE_BINARY_ADD, binary_shape, binary_flags);
if ( res.helper_add_kernel == NULL) {
fprintf( stderr, "JIT for TPP fwd helper_add_kernel failed. Bailing...!\n");
exit(-1);
}
if (res.fuse_type == 1 || res.fuse_type == 3 || res.fuse_type == 4 || res.fuse_type == 5) {
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, res.datatype_in, res.datatype_out, res.datatype_comp);
unary_flags = ( (res.fuse_type == 4 || res.fuse_type == 5) ? LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT : LIBXSMM_MELTW_FLAG_UNARY_NONE);
res.inv_relu_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_RELU_INV, unary_shape, unary_flags);
if ( res.inv_relu_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd inv_relu_kernel failed. Bailing...!\n");
exit(-1);
}
}
if (res.fuse_type == 2 || res.fuse_type == 3 || res.fuse_type == 5) {
unary_shape = libxsmm_create_meltw_unary_shape(res.bc, res.H*res.W / res.num_HW_blocks, ldo, ldo, res.datatype_in, res.datatype_out, res.datatype_comp);
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
res.ewise_copy_kernel = libxsmm_dispatch_meltw_unary_v2(LIBXSMM_MELTW_TYPE_UNARY_IDENTITY, unary_shape, unary_flags);
if ( res.ewise_copy_kernel == NULL) {
fprintf( stderr, "JIT for TPP bwd ewise_copy_kernel failed. Bailing...!\n");
exit(-1);
}
}
/* TPP equations for dgamma, dbeta and din */
ld = bc;
tmp_ld2 = 1;
/* dgamma function */
my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
op_metadata[0].eqn_idx = my_eqn11;
op_metadata[0].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags);
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
op_metadata[1].eqn_idx = my_eqn11;
op_metadata[1].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, res.datatype_comp, unary_flags);
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
op_metadata[2].eqn_idx = my_eqn11;
op_metadata[2].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[2], LIBXSMM_MELTW_TYPE_BINARY_MUL, res.datatype_comp, binary_flags);
ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT;
op_metadata[3].eqn_idx = my_eqn11;
op_metadata[3].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[3], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags);
arg_metadata[0].eqn_idx = my_eqn11;
arg_metadata[0].in_arg_pos = 0;
arg_shape[0].m = res.bc; /* inp [HW, bc] */
arg_shape[0].n = res.H*res.W /res.num_HW_blocks;
arg_shape[0].ld = ld;
arg_shape[0].type = res.datatype_in;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr);
arg_metadata[1].eqn_idx = my_eqn11;
arg_metadata[1].in_arg_pos = 1;
arg_shape[1].m = res.bc; /* a [bc] */
arg_shape[1].n = 1;
arg_shape[1].ld = tmp_ld2;
arg_shape[1].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr);
arg_metadata[2].eqn_idx = my_eqn11;
arg_metadata[2].in_arg_pos = 2;
arg_shape[2].m = res.bc; /* b [bc] */
arg_shape[2].n = 1;
arg_shape[2].ld = tmp_ld2;
arg_shape[2].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr);
arg_metadata[3].eqn_idx = my_eqn11;
arg_metadata[3].in_arg_pos = 3;
arg_shape[3].m = res.bc; /* dout [HW, bc] */
arg_shape[3].n = res.H*res.W/res.num_HW_blocks;
arg_shape[3].ld = ld;
arg_shape[3].type = res.datatype_out;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr);
arg_metadata[4].eqn_idx = my_eqn11;
arg_metadata[4].in_arg_pos = 4;
arg_shape[4].m = res.bc; /* dgamma [bc] */
arg_shape[4].n = 1;
arg_shape[4].ld = tmp_ld2;
arg_shape[4].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr);
eqn_out_arg_shape.m = res.bc; /* dgamma [bc] */
eqn_out_arg_shape.n = 1;
eqn_out_arg_shape.ld = tmp_ld2;
eqn_out_arg_shape.type = res.datatype_comp;
/* libxsmm_matrix_eqn_tree_print( my_eqn11 ); */
/* libxsmm_matrix_eqn_rpn_print ( my_eqn11 ); */
res.dgamma_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn11, eqn_out_arg_shape );
if ( res.dgamma_func == NULL) {
fprintf( stderr, "JIT for TPP bwd dgamma_func (eqn11) failed. Bailing...!\n");
exit(-1);
}
/* dbeta function */
my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [bc] = dout [HW, bc] + dbeta [bc] */
binary_flags = LIBXSMM_MELTW_FLAG_BINARY_NONE;
op_metadata[0].eqn_idx = my_eqn12;
op_metadata[0].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_binary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_BINARY_ADD, res.datatype_comp, binary_flags); /* dbeta_tmp [HW, bc] */
unary_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
op_metadata[1].eqn_idx = my_eqn12;
op_metadata[1].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_unary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, res.datatype_comp, unary_flags); /* [HW, bc] -> [bc] */
arg_metadata[0].eqn_idx = my_eqn12;
arg_metadata[0].in_arg_pos = 3;
arg_shape[0].m = res.bc; /* dout [HW, bc] */
arg_shape[0].n = res.H*res.W /res.num_HW_blocks;
arg_shape[0].ld = ld;
arg_shape[0].type = res.datatype_out;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr);
arg_metadata[1].eqn_idx = my_eqn12;
arg_metadata[1].in_arg_pos = 5;
arg_shape[1].m = res.bc; /* dbeta [bc] */
arg_shape[1].n = 1;
arg_shape[1].ld = tmp_ld2;
arg_shape[1].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr);
eqn_out_arg_shape.m = res.bc; /* dbeta [bc] */
eqn_out_arg_shape.n = 1;
eqn_out_arg_shape.ld = tmp_ld2;
eqn_out_arg_shape.type = res.datatype_comp;
/* libxsmm_matrix_eqn_tree_print( my_eqn12 ); */
/* libxsmm_matrix_eqn_rpn_print ( my_eqn12 ); */
res.dbeta_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn12, eqn_out_arg_shape );
if ( res.dbeta_func == NULL) {
fprintf( stderr, "JIT for TPP bwd dbeta_func (eqn12) failed. Bailing...!\n");
exit(-1);
}
/* din = gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (nhw*del_output_ptr[v] - (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */
/* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */
/* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * del_beta_ptr[v] + gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v]) */
/* din = a * del_output_ptr[v] + b * input_ptr[v] + c */
/* a = gamma_ptr[bc] * brstd_ptr[bc] */
/* b = gamma_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * brstd_ptr[bc] * recp_nhw */
/* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */
/* din long equation */
my_eqn16 = libxsmm_matrix_eqn_create(); /* din = a * dout + (b * inp + c) */
ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT;
op_metadata[0].eqn_idx = my_eqn16;
op_metadata[0].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[0], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags);
arg_metadata[0].eqn_idx = my_eqn16;
arg_metadata[0].in_arg_pos = 1;
arg_shape[0].m = res.bc; /* a [bc] */
arg_shape[0].n = 1;
arg_shape[0].ld = tmp_ld2;
arg_shape[0].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[0], arg_shape[0], arg_singular_attr);
arg_metadata[1].eqn_idx = my_eqn16;
arg_metadata[1].in_arg_pos = 3;
arg_shape[1].m = res.bc; /* dout [HW, bc] */
arg_shape[1].n = res.H*res.W /res.num_HW_blocks;
arg_shape[1].ld = ld;
arg_shape[1].type = res.datatype_out;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[1], arg_shape[1], arg_singular_attr);
ternary_flags = LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT;
op_metadata[1].eqn_idx = my_eqn16;
op_metadata[1].op_arg_pos = -1;
libxsmm_matrix_eqn_push_back_ternary_op_v2(op_metadata[1], LIBXSMM_MELTW_TYPE_TERNARY_MULADD, res.datatype_comp, ternary_flags);
arg_metadata[2].eqn_idx = my_eqn16;
arg_metadata[2].in_arg_pos = 0;
arg_shape[2].m = res.bc; /* inp [HW, bc] */
arg_shape[2].n = res.H*res.W /res.num_HW_blocks;
arg_shape[2].ld = ld;
arg_shape[2].type = res.datatype_in;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[2], arg_shape[2], arg_singular_attr);
arg_metadata[3].eqn_idx = my_eqn16;
arg_metadata[3].in_arg_pos = 2;
arg_shape[3].m = res.bc; /* b [bc] */
arg_shape[3].n = 1;
arg_shape[3].ld = tmp_ld2;
arg_shape[3].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[3], arg_shape[3], arg_singular_attr);
arg_metadata[4].eqn_idx = my_eqn16;
arg_metadata[4].in_arg_pos = 7;
arg_shape[4].m = res.bc; /* c [bc] */
arg_shape[4].n = 1;
arg_shape[4].ld = tmp_ld2;
arg_shape[4].type = res.datatype_comp;
libxsmm_matrix_eqn_push_back_arg_v2(arg_metadata[4], arg_shape[4], arg_singular_attr);
eqn_out_arg_shape.m = res.bc; /* din [HW, bc] */
eqn_out_arg_shape.n = res.H*res.W/res.num_HW_blocks;
eqn_out_arg_shape.ld = ld;
eqn_out_arg_shape.type = res.datatype_out;
/* libxsmm_matrix_eqn_tree_print( my_eqn16 ); */
/* libxsmm_matrix_eqn_rpn_print ( my_eqn16 ); */
res.din_func = libxsmm_dispatch_matrix_eqn_v2( my_eqn16, eqn_out_arg_shape );
if ( res.din_func == NULL) {
fprintf( stderr, "JIT for TPP bwd din_func (eqn16) failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
dbeta_N_offset = LIBXSMM_UP2(res.CP * res.N * res.bc, 64);
res.scratch_size = sizeof(float) * ( dbeta_N_offset /* dbeta_N*/ + LIBXSMM_UP2(res.CP * res.N * res.bc, 64) /*dgamma_N */ );
return res;
}
void destroy_my_bn_fwd(my_bn_fwd_config* cfg) {
libxsmm_barrier_destroy(cfg->barrier);
/* when/if libxsmm_matrix_eqn_destroy gets added, destructords for equations should go here */
}
void destroy_my_bn_bwd(my_bn_bwd_config* cfg) {
libxsmm_barrier_destroy(cfg->barrier);
/* when/if libxsmm_matrix_eqn_destroy gets added, destructords for equations should go here */
}
void my_bn_fwd_exec_f32( my_bn_fwd_config cfg, const float *pinp, const float *pinp_add, const float *pgamma, const float *pbeta, float *mean, float *var, float *pout,
unsigned char *prelumask, float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type ) {
const libxsmm_blasint N = cfg.N;
const libxsmm_blasint CP = cfg.CP;
const libxsmm_blasint HW = cfg.H * cfg.W;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_dN = CP * N;
/* compute chunk size */
const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ?
(work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN;
const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_C = CP;
/* compute chunk size */
const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ?
(work_C / cfg.threads) : ((work_C / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_C = (ltid * chunksize_C < work_C) ? (ltid * chunksize_C) : work_C;
const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, beta, pbeta, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, mean, mean, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, var, var, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(4, const float, inp_add, pinp_add, CP, HW, bc); /* [N, CP, HW, bc] */
float alpha = 0.0f;
LIBXSMM_VLA_DECL(4, unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */
const float scale = 1.0f /((float)N * HW);
LIBXSMM_VLA_DECL(3, float, sum_X_X2, ((float*)scratch), CP, bc); /* [2, CP, bc] */
LIBXSMM_ASSUME_ALIGNED(sum_X_X2_, 64);
const libxsmm_blasint sum_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * 2 * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, sum_N, ((float*)scratch) + sum_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(sum_N_, 64);
const libxsmm_blasint sumsq_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + sum_N_offset + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, sumsq_N, ((float*)scratch) + sumsq_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(sumsq_N_, 64);
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param reduce_HW_param;
libxsmm_meltw_unary_param all_relu_param;
libxsmm_matrix_arg arg_array[6];
libxsmm_matrix_eqn_param eqn_param;
memset( &all_zero_param, 0, sizeof(all_zero_param));
memset( &add_param, 0, sizeof(add_param));
memset( &reduce_HW_param, 0, sizeof(reduce_HW_param));
memset( &all_relu_param, 0, sizeof(all_relu_param));
memset( &eqn_param, 0, sizeof(eqn_param));
LIBXSMM_ALIGNED(float s[bc], 64);
LIBXSMM_ALIGNED(float b[bc], 64);
int n, cp;
int cpxnt;
if (norm_type == MY_BN_FULL_NORM) {
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb;
float *sum_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, n, 0, N, bc);
float *sumsq_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, n, 0, N, bc);
all_zero_param.out.primary = sum_ncp_ptr;
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sumsq_ncp_ptr;
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_ncp_ptr[cb] = 0.0f; */
/* sumsq_ncp_ptr[cb] = 0.0f; */
/* } */
LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*bc], 64);
reduce_HW_param.out.primary = lcl_sum_X_X2; /* [2*bc] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
cfg.reduce_HW_kernel(&reduce_HW_param); /* [HW, bc] -----> [2 * bc] */
add_param.in0.primary = sum_ncp_ptr;
add_param.in1.primary = lcl_sum_X_X2;
add_param.out.primary = sum_ncp_ptr;
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = sumsq_ncp_ptr;
add_param.in1.primary = &lcl_sum_X_X2[bc];
add_param.out.primary = sumsq_ncp_ptr;
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */
/* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[bc + cb]; */
/* } */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) {
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_X_X2[cp*bc + cb] = 0.0f; */
/* sum_X_X2[CP*bc + (cp*bc + cb)] = 0.0f; */
/* } */
int cb, ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_X_X2[cp*bc + cb] += sum_N[cp*N*bc + n*bc + cb]; */
/* sum_X_X2[CP*bc + (cp*bc + cb)] += sumsq_N[cp*N*bc + n*bc + cb]; */
/* } */
}
for(cb = 0; cb < bc; cb++){
mean[cp*bc + cb] = (LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, cb, CP, bc)) * scale; /* E[X] */
var[cp*bc + cb] = ((LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, cb, CP, bc)) * scale) - (mean[cp*bc + cb]*mean[cp*bc + cb]);
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
} /* mean and var computation are for the full norm only */
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
for(cb = 0; cb < bc; cb++){
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
s[cb] = 1.0f / ((float)sqrt(lvar + eps)); /* s = 1/sqrt(var(X) + eps) [bc] */
b[cb] = -1 * lmean * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [bc] */
/* s[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* s = 1/sqrt(var(X) + eps) [bc] */
/* b[cb] = -1 * mean[cp*bc + cb] * s[cb]; */ /* b = -E[X]/sqrt(var(X) + eps) [bc] */
}
arg_array[1].primary = s; /* [bc] */
arg_array[2].primary = b; /* [bc] */
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); /* [bc] */
arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, bc); /* [bc] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
arg_array[5].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */
}
if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
eqn_param.output.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ?
(void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (bc/BITS_PER_CHAR)) : NULL );
}
cfg.func10(&eqn_param); /* Normalization equation + relu + eltwise -> y = relu( ((s*x + b)*gamma + beta) + inp_add) */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_bn_fwd_exec_bf16( my_bn_fwd_config cfg, const libxsmm_bfloat16 *pinp, const libxsmm_bfloat16 *pinp_add,
const float *pgamma, const float *pbeta, float *mean, float *var, libxsmm_bfloat16 *pout, unsigned char *prelumask,
float eps, int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type ) {
const libxsmm_blasint N = cfg.N;
const libxsmm_blasint CP = cfg.CP;
const libxsmm_blasint HW = cfg.H * cfg.W;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_dN = CP * N;
/* compute chunk size */
const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ?
(work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_dN = (ltid * chunksize_dN < work_dN) ? (ltid * chunksize_dN) : work_dN;
const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_C = CP;
/* compute chunk size */
const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ?
(work_C / cfg.threads) : ((work_C / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_C = (ltid * chunksize_C < work_C) ? (ltid * chunksize_C) : work_C;
const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, out, pout, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, beta, pbeta, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, mean, mean, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, var, var, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp_add, pinp_add, CP, HW, bc); /* [N, CP, HW, bc] */
float alpha = 0.0f;
LIBXSMM_VLA_DECL(4, unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param all_relu_param;
libxsmm_matrix_arg arg_array[6];
libxsmm_matrix_eqn_param eqn_param;
memset( &add_param, 0, sizeof(add_param));
memset( &all_relu_param, 0, sizeof(all_relu_param));
memset( &eqn_param, 0, sizeof(eqn_param));
LIBXSMM_ALIGNED(float s[bc], 64);
LIBXSMM_ALIGNED(float b[bc], 64);
int n, cp;
int cpxnt;
if (norm_type == MY_BN_FULL_NORM) {
const float scale = 1.0f /((float)N * HW);
LIBXSMM_VLA_DECL(3, float, sum_X_X2, ((float*)scratch), CP, bc); /* [2, CP, bc] */
LIBXSMM_ASSUME_ALIGNED(sum_X_X2_, 64);
const libxsmm_blasint sum_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * 2 * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, sum_N, ((float*)scratch) + sum_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(sum_N_, 64);
const libxsmm_blasint sumsq_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + sum_N_offset + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, sumsq_N, ((float*)scratch) + sumsq_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(sumsq_N_, 64);
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_unary_param reduce_HW_param;
memset( &all_zero_param, 0, sizeof(all_zero_param));
memset( &reduce_HW_param, 0, sizeof(reduce_HW_param));
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb;
float *sum_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, n, 0, N, bc);
float *sumsq_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, n, 0, N, bc);
all_zero_param.out.primary = sum_ncp_ptr;
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sumsq_ncp_ptr;
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_ncp_ptr[cb] = 0.0f; */
/* sumsq_ncp_ptr[cb] = 0.0f; */
/* } */
LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*bc], 64);
reduce_HW_param.out.primary = lcl_sum_X_X2; /* [2*bc] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
cfg.reduce_HW_kernel(&reduce_HW_param); /* [HW, bc] -----> [2 * bc] */
add_param.in0.primary = sum_ncp_ptr;
add_param.in1.primary = lcl_sum_X_X2;
add_param.out.primary = sum_ncp_ptr;
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = sumsq_ncp_ptr;
add_param.in1.primary = &lcl_sum_X_X2[bc];
add_param.out.primary = sumsq_ncp_ptr;
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */
/* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[bc + cb]; */
/* } */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) {
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_X_X2[cp*bc + cb] = 0.0f; */
/* sum_X_X2[CP*bc + (cp*bc + cb)] = 0.0f; */
/* } */
int cb, ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sum_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, 0, CP, bc);
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, sumsq_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, 0, CP, bc);
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* sum_X_X2[cp*bc + cb] += sum_N[cp*N*bc + n*bc + cb]; */
/* sum_X_X2[CP*bc + (cp*bc + cb)] += sumsq_N[cp*N*bc + n*bc + cb]; */
/* } */
}
for(cb = 0; cb < bc; cb++){
mean[cp*bc + cb] = (LIBXSMM_VLA_ACCESS(3, sum_X_X2, 0, cp, cb, CP, bc)) * scale; /* E[X] */
var[cp*bc + cb] = ((LIBXSMM_VLA_ACCESS(3, sum_X_X2, 1, cp, cb, CP, bc)) * scale) - (mean[cp*bc + cb]*mean[cp*bc + cb]);
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
} /* mean and var computation are for the full norm only */
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
for(cb = 0; cb < bc; cb++){
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
s[cb] = 1.0f / ((float)sqrt(lvar + eps)); /* s = 1/sqrt(var(X) + eps) [bc] */
b[cb] = -1 * lmean * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [bc] */
/* s[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */ /* s = 1/sqrt(var(X) + eps) [bc] */
/* b[cb] = -1 * mean[cp*bc + cb] * s[cb]; */ /* b = -E[X]/sqrt(var(X) + eps) [bc] */
}
arg_array[1].primary = s; /* [bc] */
arg_array[2].primary = b; /* [bc] */
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc); /* [bc] */
arg_array[4].primary = (void*)&LIBXSMM_VLA_ACCESS(2, beta, cp, 0, bc); /* [bc] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
arg_array[5].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW, bc] */
}
if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
eqn_param.output.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ?
(void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, (bc/BITS_PER_CHAR)) : NULL );
}
cfg.func10(&eqn_param); /* Normalization equation + relu + eltwise -> y = relu( ((s*x + b)*gamma + beta) + inp_add) */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_bn_bwd_exec_f32( my_bn_bwd_config cfg, float *pdout, const float *pinp, const float *mean, const float *var, const float *pgamma, const unsigned char *prelumask,
float *pdin, float *pdin_add, float *pdgamma, float *pdbeta, float eps,
int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type) {
const libxsmm_blasint N = cfg.N;
const libxsmm_blasint CP = cfg.CP;
const libxsmm_blasint HW = cfg.H * cfg.W;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_dN = N * CP;
/* compute chunk size */
const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ?
(work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_dN = ( ltid * chunksize_dN < work_dN) ? ( ltid * chunksize_dN) : work_dN;
const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_C = CP;
/* compute chunk size */
const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ?
(work_C / cfg.threads) : ((work_C / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_C = ( ltid * chunksize_C < work_C) ? ( ltid * chunksize_C) : work_C;
const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, const float, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, mean, mean, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, var, var, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(4, float, din_add, pdin_add, CP, HW, bc); /* [N, CP, HW, bc] */
float alpha = 0.0f;
LIBXSMM_VLA_DECL(4, const unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */
const libxsmm_blasint dbeta_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, dgamma_N, ((float*)scratch), N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(dgamma_N_, 64);
LIBXSMM_VLA_DECL(3, float, dbeta_N, ((float*)scratch) + dbeta_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(dbeta_N_, 64);
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param copy_param;
libxsmm_meltw_unary_param all_relu_param;
libxsmm_meltw_unary_param ewise_copy_param;
memset( &all_zero_param, 0, sizeof(all_zero_param));
memset( &add_param, 0, sizeof(add_param));
memset( ©_param, 0, sizeof(copy_param));
memset( &all_relu_param, 0, sizeof(all_relu_param));
memset( &ewise_copy_param, 0, sizeof(ewise_copy_param));
libxsmm_matrix_arg arg_array[8];
libxsmm_matrix_eqn_param eqn_param;
memset( &eqn_param, 0, sizeof(eqn_param));
LIBXSMM_ALIGNED(float a[bc], 64); /* could also get moved into the scratch but left on the private stack as these are small, same below */
LIBXSMM_ALIGNED(float b[bc], 64);
LIBXSMM_ALIGNED(float c[bc], 64);
int n, cp;
int cpxnt;
if (norm_type == MY_BN_FULL_NORM) {
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
LIBXSMM_ALIGNED(float lcl_dgamma_ptr[bc], 64);
LIBXSMM_ALIGNED(float lcl_dbeta_ptr[bc], 64);
float *dgamma_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, n, 0, N, bc);
float *dbeta_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, n, 0, N, bc);
all_zero_param.out.primary = lcl_dgamma_ptr;
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = lcl_dbeta_ptr;
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* lcl_dgamma_ptr[cb] = 0.0f; */
/* lcl_dbeta_ptr[cb] = 0.0f; */
/* } */
for(cb = 0; cb < bc; cb++){
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
a[cb] = 1.0f / ((float)sqrt(lvar + eps));
b[cb] = -a[cb] * lmean;
/* a[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */
/* b[cb] = -a[cb]*mean[cp*bc + cb]; */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = lcl_dgamma_ptr;
arg_array[5].primary = lcl_dbeta_ptr;
arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc);
for(hwb=0; hwb < num_HW_blocks; hwb++){
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE ||
cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
all_relu_param.op.primary = (void*)(&alpha);
all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
all_relu_param.in.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ?
(void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc/8)
: NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc) */ ); /* dout_fwd ? nonsense? */
all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
cfg.inv_relu_kernel(&all_relu_param);
} /* ReLU/mask */
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
cfg.ewise_copy_kernel(&ewise_copy_param);
} /* Eltwise */
}
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
eqn_param.inputs = arg_array;
eqn_param.output.primary = lcl_dgamma_ptr;
cfg.dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */
eqn_param.output.primary = lcl_dbeta_ptr;
cfg.dbeta_func(&eqn_param); /* dbeta += dout */
}
copy_param.in.primary = lcl_dgamma_ptr;
copy_param.out.primary = dgamma_ncp_ptr;
cfg.helper_copy_kernel(©_param);
copy_param.in.primary = lcl_dbeta_ptr;
copy_param.out.primary = dbeta_ncp_ptr;
cfg.helper_copy_kernel(©_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */
/* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */
/* } */
}
libxsmm_barrier_wait(cfg.barrier, ltid);
for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) {
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* pdgamma[cp*bc + cb] = 0.0f; */
/* pdbeta[cp*bc + cb] = 0.0f; */
/* } */
int ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* pdgamma[cp*bc + cb] += dgamma_N[cp*N*bc + n*bc + cb]; */
/* pdbeta[cp*bc + cb] += dbeta_N[cp*N*bc + n*bc + cb]; */
/* } */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
} /* this is only computed in case of full backward (norm_type ~ 0) */
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
for(cb = 0; cb < bc; cb++){
float lgamma = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, bc);
float ldgamma = LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, bc);
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
float ldbeta = LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, bc);
a[cb] = lgamma / ((float)sqrt(lvar + eps)); /* a = gamma_ptr[bc] * brstd_ptr[bc] */
b[cb] = -a[cb] * scale * ldgamma / ((float)sqrt(lvar + eps)); /* b = gamma_ptr[bc] * brstd_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * recp_nhw */
c[cb] = -b[cb] * lmean - a[cb] * scale * ldbeta ; /* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
cfg.din_func(&eqn_param); /* din = dout * a + b * inp + c */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_bn_bwd_exec_bf16( my_bn_bwd_config cfg, libxsmm_bfloat16 *pdout, const libxsmm_bfloat16 *pinp, const float *mean, const float *var, const float *pgamma, const unsigned char *prelumask,
libxsmm_bfloat16 *pdin, libxsmm_bfloat16 *pdin_add, float *pdgamma, float *pdbeta, float eps,
int start_tid, int my_tid, void *scratch, my_bn_norm_type norm_type) {
const libxsmm_blasint N = cfg.N;
const libxsmm_blasint CP = cfg.CP;
const libxsmm_blasint HW = cfg.H * cfg.W;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint num_HW_blocks = cfg.num_HW_blocks;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_dN = N * CP;
/* compute chunk size */
const libxsmm_blasint chunksize_dN = (work_dN % cfg.threads == 0) ?
(work_dN / cfg.threads) : ((work_dN / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_dN = ( ltid * chunksize_dN < work_dN) ? ( ltid * chunksize_dN) : work_dN;
const libxsmm_blasint thr_end_dN = ((ltid + 1) * chunksize_dN < work_dN) ? ((ltid + 1) * chunksize_dN) : work_dN;
/* number of tasks that could be run in parallel for 1d blocking */
/* Question: each thread should take a number of full (of length CP chunks) or can we really do a partial split here? */
const libxsmm_blasint work_C = CP;
/* compute chunk size */
const libxsmm_blasint chunksize_C = (work_C % cfg.threads == 0) ?
(work_C / cfg.threads) : ((work_C / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin_C = ( ltid * chunksize_C < work_C) ? ( ltid * chunksize_C) : work_C;
const libxsmm_blasint thr_end_C = ((ltid + 1) * chunksize_C < work_C) ? ((ltid + 1) * chunksize_C) : work_C;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din, pdin, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, inp, pinp, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dout, pdout, CP, HW, bc); /* [N, CP, HW, bc] */
LIBXSMM_VLA_DECL(2, const float, gamma, pgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, mean, mean, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, const float, var, var, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, bc); /* [CP, bc] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din_add, pdin_add, CP, HW, bc); /* [N, CP, HW, bc] */
float alpha = 0.0f;
LIBXSMM_VLA_DECL(4, const unsigned char, relumask, prelumask, CP, HW, bc/BITS_PER_CHAR); /* [N, CP, HW, bc/BITS_PER_CHAR] */
libxsmm_matrix_arg arg_array[8];
libxsmm_matrix_eqn_param eqn_param;
memset( &eqn_param, 0, sizeof(eqn_param));
LIBXSMM_ALIGNED(float a[bc], 64); /* could also get moved into the scratch but left on the private stack as these are small, same below */
LIBXSMM_ALIGNED(float b[bc], 64);
LIBXSMM_ALIGNED(float c[bc], 64);
int n, cp;
int cpxnt;
if (norm_type == MY_BN_FULL_NORM) {
const libxsmm_blasint dbeta_N_offset = (LIBXSMM_UP2((uintptr_t)(((float*)scratch) + CP * N * bc), 64) - ((uintptr_t)(scratch))) / sizeof(float);
LIBXSMM_VLA_DECL(3, float, dgamma_N, ((float*)scratch), N, bc); /* [CP, N, bc] */
LIBXSMM_VLA_DECL(3, float, dbeta_N, ((float*)scratch) + dbeta_N_offset, N, bc); /* [CP, N, bc] */
LIBXSMM_ASSUME_ALIGNED(dgamma_N_, 64);
LIBXSMM_ASSUME_ALIGNED(dbeta_N_, 64);
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param copy_param;
libxsmm_meltw_unary_param all_relu_param;
libxsmm_meltw_unary_param ewise_copy_param;
memset( &all_zero_param, 0, sizeof(all_zero_param));
memset( &add_param, 0, sizeof(add_param));
memset( ©_param, 0, sizeof(copy_param));
memset( &all_relu_param, 0, sizeof(all_relu_param));
memset( &ewise_copy_param, 0, sizeof(ewise_copy_param));
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
LIBXSMM_ALIGNED(float lcl_dgamma_ptr[bc], 64);
LIBXSMM_ALIGNED(float lcl_dbeta_ptr[bc], 64);
float *dgamma_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, n, 0, N, bc);
float *dbeta_ncp_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, n, 0, N, bc);
all_zero_param.out.primary = lcl_dgamma_ptr;
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = lcl_dbeta_ptr;
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (cb = 0; cb < bc; cb++) { */
/* lcl_dgamma_ptr[cb] = 0.0f; */
/* lcl_dbeta_ptr[cb] = 0.0f; */
/* } */
for(cb = 0; cb < bc; cb++){
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
a[cb] = 1.0f / ((float)sqrt(lvar + eps));
b[cb] = -a[cb] * lmean;
/* a[cb] = 1.0f / ((float)sqrt(var[cp*bc + cb] + eps)); */
/* b[cb] = -a[cb]*mean[cp*bc + cb]; */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = lcl_dgamma_ptr;
arg_array[5].primary = lcl_dbeta_ptr;
arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc);
for(hwb=0; hwb < num_HW_blocks; hwb++){
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE ||
cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
if (cfg.fuse_type == MY_BN_FUSE_RELU || cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
all_relu_param.op.primary = (void*)(&alpha);
all_relu_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
all_relu_param.in.secondary = ((cfg.fuse_type == MY_BN_FUSE_RELU_WITH_MASK || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) ?
(void*)&LIBXSMM_VLA_ACCESS(4, relumask, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc/8)
: NULL /*&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc) */ ); /* dout_fwd ? nonsense? */
all_relu_param.out.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc); /* [HW,bc] */
cfg.inv_relu_kernel(&all_relu_param);
} /* ReLU/mask */
if (cfg.fuse_type == MY_BN_FUSE_ELTWISE || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU || cfg.fuse_type == MY_BN_FUSE_ELTWISE_RELU_WITH_MASK) {
ewise_copy_param.in.primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
ewise_copy_param.out.primary = &LIBXSMM_VLA_ACCESS(4, din_add, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
cfg.ewise_copy_kernel(&ewise_copy_param);
} /* Eltwise */
}
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
eqn_param.inputs = arg_array;
eqn_param.output.primary = lcl_dgamma_ptr;
cfg.dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */
eqn_param.output.primary = lcl_dbeta_ptr;
cfg.dbeta_func(&eqn_param); /* dbeta += dout */
}
copy_param.in.primary = lcl_dgamma_ptr;
copy_param.out.primary = dgamma_ncp_ptr;
cfg.helper_copy_kernel(©_param);
copy_param.in.primary = lcl_dbeta_ptr;
copy_param.out.primary = dbeta_ncp_ptr;
cfg.helper_copy_kernel(©_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */
/* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */
/* } */
}
libxsmm_barrier_wait(cfg.barrier, ltid);
for ( cp = thr_begin_C; cp < thr_end_C; ++cp ) {
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
cfg.all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
cfg.all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* pdgamma[cp*bc + cb] = 0.0f; */
/* pdbeta[cp*bc + cb] = 0.0f; */
/* } */
int ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, bc);
cfg.helper_add_kernel(&add_param);
add_param.in0.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
add_param.in1.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_N, cp, ni, 0, N, bc);
add_param.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, bc);
cfg.helper_add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < bc; cb++) { */
/* pdgamma[cp*bc + cb] += dgamma_N[cp*N*bc + n*bc + cb]; */
/* pdbeta[cp*bc + cb] += dbeta_N[cp*N*bc + n*bc + cb]; */
/* } */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
} /* this is only computed in case of full backward (norm_type ~ 0) */
for ( cpxnt = thr_begin_dN; cpxnt < thr_end_dN; ++cpxnt ) {
n = cpxnt%N;
cp = cpxnt/N;
int hwb, cb;
for(cb = 0; cb < bc; cb++){
float lgamma = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, bc);
float ldgamma = LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, bc);
float lvar = LIBXSMM_VLA_ACCESS(2, var, cp, cb, bc);
float lmean = LIBXSMM_VLA_ACCESS(2, mean, cp, cb, bc);
float ldbeta = LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, bc);
a[cb] = lgamma / ((float)sqrt(lvar + eps)); /* a = gamma_ptr[bc] * brstd_ptr[bc] */
b[cb] = -a[cb] * scale * ldgamma / ((float)sqrt(lvar + eps)); /* b = gamma_ptr[bc] * brstd_ptr[bc] * del_gamma_ptr[v] * brstd_ptr[bc] * recp_nhw */
c[cb] = -b[cb] * lmean - a[cb] * scale * ldbeta ; /* c = -gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * del_beta_ptr[bc] + gamma_ptr[bc] * brstd_ptr[bc] * recp_nhw * bmean_ptr[bc] * del_gamma_ptr[bc] * brstd_ptr[bc]) */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = (void*)&LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, bc);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = (void*)&LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
arg_array[3].primary = (void*)&LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);
eqn_param.output.primary = (void*)&LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, bc);;
eqn_param.inputs = arg_array;
cfg.din_func(&eqn_param); /* din = dout * a + b * inp + c */
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ASCII characters in feature name.");
}
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
DigraphTemplate.h | ///////////////////////////////////////////////////////////////////////////////
// SOFTWARE COPYRIGHT NOTICE AGREEMENT //
// This software and its documentation are copyright (2015) by the //
// Broad Institute. All rights are reserved. This software is supplied //
// without any warranty or guaranteed support whatsoever. The Broad //
// Institute is not responsible for its use, misuse, or functionality. //
///////////////////////////////////////////////////////////////////////////////
// This file contains some template functions from Digraph.h. They are here in
// a separate file so that these functions do not have to be inlined, thereby
// allowing for reduction of compilation time and executable size (in principle).
//
// See Digraph.h for notes about usage of this file.
// In particular, do not include this file to resolve link errors.
// The digraph-derived template classes are explicitly instantiated in a single
// module for each template parameter. This is typically in the .cc file
// associated with the .h file that defines the template parameter. So, for
// example, all the explicit instantiations of methods of digraphE<KmerPath>
// are declared in KmerPath.cc. To resolve link errors, find the right place to
// explicitly instantiate the missing method, and add it to the list of explicit
// instantiations at the end of that file.
#ifndef DIGRAPH_TEMPLATE_H
#define DIGRAPH_TEMPLATE_H
// MakeDepend: library OMP
// MakeDepend: cflags OMP_FLAGS
#include "CoreTools.h"
#include "Equiv.h"
#include "FeudalMimic.h"
#include "Set.h"
#include "VecUtilities.h"
#include "graph/Digraph.h"
#include <cstddef>
template<class E> vec<int> digraphE<E>:: EdgesBoundedBy( const int e1, const int e2,
const vec<int>& to_left, const vec<int>& to_right ) const
{ int v = to_right[e1], w = to_left[e2];
vec<int> edges, verts;
set<int> edgesx, vertsx;
edges.push_back( e1, e2 );
edgesx.insert(e1), edgesx.insert(e2);
verts.push_back( v, w );
vertsx.insert(v), vertsx.insert(w);
for ( int i = 0; i < verts.isize( ); i++ )
{ int x = verts[i];
for ( int j = 0; j < To(x).isize( ); j++ )
{ int y = To(x)[j];
if ( Member( vertsx, y ) ) continue;
int e = EdgeObjectIndexByIndexTo( x, j );
if ( e == e1 || e == e2 ) continue;
verts.push_back(y);
vertsx.insert(y);
if ( Member( edgesx, e ) ) continue;
edges.push_back(e);
edgesx.insert(e); }
for ( int j = 0; j < From(x).isize( ); j++ )
{ int y = From(x)[j];
if ( Member( vertsx, y ) ) continue;
int e = EdgeObjectIndexByIndexFrom( x, j );
if ( e == e1 || e == e2 ) continue;
verts.push_back(y);
vertsx.insert(y);
if ( Member( edgesx, e ) ) continue;
edges.push_back(e);
edgesx.insert(e); } }
return edges; }
template<class E> void digraphE<E>::InitialEdges( vec<int>& v ) const
{ v.clear( );
for ( int x = 0; x < N( ); x++ )
{ if ( To(x).empty( ) )
{ for ( int j = 0; j < From(x).isize( ); j++ )
v.push_back( EdgeObjectIndexByIndexFrom( x, j ) ); } } }
template<class E> void digraphE<E>::TerminalEdges( vec<int>& v ) const
{ v.clear( );
for ( int x = 0; x < N( ); x++ )
{ if ( From(x).empty( ) )
{ for ( int j = 0; j < To(x).isize( ); j++ )
v.push_back( EdgeObjectIndexByIndexTo( x, j ) ); } } }
template<class E> digraphE<E>::digraphE(
const ConstructorType2 constructor_type, const digraphE& g, const vec<int>& ed,
const vec<int>& to_left, const vec<int>& to_right )
{ ForceAssertEq( (int) constructor_type, (int) COMPLETE_SUBGRAPH_EDGES );
ForceAssert( ed.UniqueOrdered( ) );
edges_.resize( ed.size( ) );
for ( int i = 0; i < ed.isize( ); i++ )
edges_[i] = g.EdgeObject( ed[i] );
vec<int> verts;
for ( int i = 0; i < ed.isize( ); i++ )
verts.push_back( to_left[ ed[i] ], to_right[ ed[i] ] );
UniqueSort(verts);
int N = verts.size( );
from_.resize(N), to_.resize(N);
from_edge_obj_.resize(N), to_edge_obj_.resize(N);
for ( int i = 0; i < ed.isize( ); i++ )
{ int e = ed[i];
int v = to_left[e], w = to_right[e];
int iv = BinPosition( verts, v ), iw = BinPosition( verts, w );
from_[iv].push_back(iw);
from_edge_obj_[iv].push_back(i);
to_[iw].push_back(iv);
to_edge_obj_[iw].push_back(i); } }
template<class E> void digraphE<E>::Initialize(
const ConstructorType1 constructor_type, const digraphE& g, const vec<int>& v )
{ ForceAssertEq( (int) constructor_type, (int) COMPLETE_SUBGRAPH );
from_.resize( v.size( ) ), to_.resize( v.size( ) );
from_edge_obj_.resize( v.size( ) ), to_edge_obj_.resize( v.size( ) );
int edgecount = 0;
vec<int> vsorted(v), vindex( v.size( ), vec<int>::IDENTITY );
SortSync( vsorted, vindex );
for ( int i = 0; i < v.isize( ); i++ )
{ int x = v[i];
for ( int j = 0; j < g.From(x).isize( ); j++ )
{ int y = g.From(x)[j];
int p2 = BinPosition( vsorted, y );
if ( p2 < 0 ) continue;
int i2 = vindex[p2];
from_[i].push_back(i2);
to_[i2].push_back(i);
from_edge_obj_[i].push_back(edgecount);
to_edge_obj_[i2].push_back(edgecount);
++edgecount; } }
edges_.reserve(edgecount);
for ( int i = 0; i < v.isize( ); i++ )
{ int x = v[i];
for ( int j = 0; j < g.From(x).isize( ); j++ )
{ int y = g.From(x)[j];
int p2 = BinPosition( vsorted, y );
if ( p2 < 0 ) continue;
int i2 = vindex[p2];
edges_.push_back( g.EdgeObjectByIndexFrom( x, j ) ); } }
for ( int i = 0; i < v.isize( ); i++ )
{ SortSync( from_[i], from_edge_obj_[i] );
SortSync( to_[i], to_edge_obj_[i] ); } }
template<class E> digraphE<E>::digraphE(
const ConstructorType1 constructor_type, const digraphE& g, const vec<int>& v )
{ Initialize( constructor_type, g, v ); }
template<class E> vec<int> digraphE<E>::EdgesConnectedTo( const vec<int>& v ) const
{ vec<int> G = VerticesConnectedTo(v), e;
for ( int x = 0; x < G.isize( ); x++ )
{ for ( int j = 0; j < From( G[x] ).isize( ); j++ )
e.push_back( EdgeObjectIndexByIndexFrom( G[x], j ) );
for ( int j = 0; j < To( G[x] ).isize( ); j++ )
e.push_back( EdgeObjectIndexByIndexTo( G[x], j ) ); }
UniqueSort(e);
return e; }
template<class E> digraphE<E> digraphE<E>::Subgraph( const vec<int>& v ) const
{ digraphE result;
result.from_.resize( v.size( ) );
result.to_.resize( v.size( ) );
result.from_edge_obj_.resize( v.size( ) );
result.to_edge_obj_.resize( v.size( ) );
int edgecount = 0;
vec<int> vsorted(v), vindex( v.size( ), vec<int>::IDENTITY );
SortSync( vsorted, vindex );
for ( int i = 0; i < v.isize( ); i++ )
{ int x = v[i];
for ( int j = 0; j < From(x).isize( ); j++ )
{ int y = From(x)[j];
int p2 = BinPosition( vsorted, y );
if ( p2 < 0 ) continue;
int i2 = vindex[p2];
result.from_[i].push_back(i2);
result.to_[i2].push_back(i);
result.from_edge_obj_[i].push_back(edgecount);
result.to_edge_obj_[i2].push_back(edgecount);
++edgecount; } }
result.edges_.reserve(edgecount);
for ( int i = 0; i < v.isize( ); i++ )
{ int x = v[i];
for ( int j = 0; j < From(x).isize( ); j++ )
{ int y = From(x)[j];
int p2 = BinPosition( vsorted, y );
if ( p2 < 0 ) continue;
int i2 = vindex[p2];
result.edges_.push_back( EdgeObjectByIndexFrom( x, j ) ); } }
for ( int i = 0; i < v.isize( ); i++ )
{ SortSync( result.from_[i], result.from_edge_obj_[i] );
SortSync( result.to_[i], result.to_edge_obj_[i] ); }
return result; }
template<class E> digraphE<E>::digraphE( const ConstructorName cname,
const digraphE& g, const vec< vec<int> >& C )
{ ForceAssert( cname == FROM_SUBS );
int nedges = 0;
for ( int i = 0; i < C.isize( ); i++ )
nedges += C[i].size( );
edges_.reserve(nedges);
vec<int> to_left, to_right;
g.ToLeft(to_left), g.ToRight(to_right);
for ( int i = 0; i < C.isize( ); i++ )
{ for ( int j = 0; j < C[i].isize( ); j++ )
edges_.push_back( g.EdgeObject( C[i][j] ) ); }
for ( int pass = 1; pass <= 2; pass++ )
{ int nverts = 0, nedges = 0;
for ( int i = 0; i < C.isize( ); i++ )
{ vec<int> verts;
for ( int j = 0; j < C[i].isize( ); j++ )
verts.push_back( to_left[ C[i][j] ], to_right[ C[i][j] ] );
UniqueSort(verts);
if ( pass == 2 )
{ for ( int j = 0; j < C[i].isize( ); j++ )
{ int v = BinPosition( verts, to_left[ C[i][j] ] );
int w = BinPosition( verts, to_right[ C[i][j] ] );
from_[ nverts + v ].push_back( nverts + w );
to_[ nverts + w ].push_back( nverts + v );
from_edge_obj_[ nverts + v ].push_back(nedges + j);
to_edge_obj_[ nverts + w ].push_back(nedges + j); } }
nverts += verts.size( );
nedges += C[i].size( ); }
if ( pass == 1 )
{ from_.resize(nverts), to_.resize(nverts);
from_edge_obj_.resize(nverts), to_edge_obj_.resize(nverts); } }
for ( int v = 0; v < N( ); v++ )
{ SortSync( from_[v], from_edge_obj_[v] );
SortSync( to_[v], to_edge_obj_[v] ); } }
template<class E> digraphE<E>::digraphE( const digraphE& g, int n )
{ equiv_rel e;
g.ComponentRelation(e);
vec<int> reps, o;
e.OrbitRepsAlt(reps);
ForceAssertLt( n, reps.isize( ) );
e.Orbit( reps[n], o );
*this = g.Subgraph(o); }
template<class E> void digraphE<E>::Initialize( const int n ){
edges_.resize(n);
from_.resize(n);
to_.resize(n);
from_edge_obj_.resize(n);
to_edge_obj_.resize(n);
}
template<class F> void digraphE<F>::EdgeEquivConstructor(
const vec<F>& edges, const equiv_rel& e )
{ edges_ = edges;
int ne = edges.size( );
vec<int> reps;
e.OrbitReps(reps);
int nv = 2 * reps.isize( );
to_edge_obj_.resize(nv);
from_edge_obj_.resize(nv);
to_.resize(nv);
from_.resize(nv);
for ( int i = 0; i < reps.isize( ); i++ )
{ vec<int> o;
e.Orbit( reps[i], o );
for ( int j = 0; j < o.isize( ); j++ )
{ from_[ 2*i ].push_back( 2*i + 1 );
from_edge_obj_[ 2*i ].push_back( o[j] );
to_[ 2*i + 1 ].push_back( 2*i );
to_edge_obj_[ 2*i + 1 ].push_back( o[j] ); } } }
template<class F> digraphE<F>::digraphE( const vec<F>& edges, const equiv_rel& e )
{ EdgeEquivConstructor( edges, e ); }
template<class F> digraphE<F>::digraphE( const vec<F>& edges,
const ConstructorBehavior constructor_type )
{ edges_ = edges;
int ne = edges.size( );
int nv = ( constructor_type == EDGES_SEPARATE ? ne * 2 : ne + 1 );
to_edge_obj_.resize(nv);
from_edge_obj_.resize(nv);
to_.resize(nv);
from_.resize(nv);
if ( constructor_type == EDGES_SEPARATE )
{ for ( int i = 0; i < ne; i++ )
{ from_[ 2*i ].push_back( 2*i + 1 );
from_edge_obj_[ 2*i ].push_back(i);
to_[ 2*i + 1 ].push_back( 2*i );
to_edge_obj_[ 2*i + 1 ].push_back(i); } }
else if ( constructor_type == EDGES_IN_LINE )
{ for ( int i = 0; i <= ne; i++ )
{ if ( i < ne )
{ from_[i].push_back(i+1);
from_edge_obj_[i].push_back(i); }
if ( i > 0 )
{ to_[i].push_back(i-1);
to_edge_obj_[i].push_back(i-1); } } }
else ForceAssert( 0 == 1 ); }
template<class F> void digraphE<F>::Used( vec<Bool>& used ) const
{ used.resize_and_set( edges_.size( ), False );
for ( int i = 0; i < N( ); i++ )
{ for ( int j = 0; j < to_edge_obj_[i].isize( ); j++ )
used[ to_edge_obj_[i][j] ] = True; } }
template<class F> int digraphE<F>::UsedCount( ) const
{ vec<Bool> used;
Used(used);
return Sum(used); }
template<class F> void digraphE<F>::JoinEdges( int x, const F& e )
{ if ( from_[x].size( ) != 1 || to_[x].size( ) != 1 )
{ cout << "Problem in JoinEdges.\n";
PRINT(x);
cout << "edges in = " << printSeq( ToEdgeObj(x) ) << endl;
cout << "edges out = " << printSeq( FromEdgeObj(x) ) << endl; }
ForceAssert( from_[x].size( ) == 1 && to_[x].size( ) == 1 );
int v = to_[x][0], w = from_[x][0];
ForceAssert( x != v || x != w );
from_[x].clear( ), from_edge_obj_[x].clear( );
to_[x].clear( ), to_edge_obj_[x].clear( );
for ( int i = 0; i < from_[v].isize( ); i++ )
{ if ( from_[v][i] == x )
{ from_[v].erase( from_[v].begin( ) + i );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + i );
break; } }
for ( int i = 0; i < to_[w].isize( ); i++ )
{ if ( to_[w][i] == x )
{ to_[w].erase( to_[w].begin( ) + i );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i );
break; } }
AddEdge( v, w, e ); }
template<class F> void digraphE<F>::RemoveUnneededVertices( )
{ for ( int i = 0; i < N( ); i++ )
{ if ( From(i).size( ) == 1 && To(i).size( ) == 1 && From(i)[0] != i )
{ F p = EdgeObjectByIndexTo( i, 0 );
p.append( EdgeObjectByIndexFrom( i, 0 ) );
JoinEdges( i, p ); } }
RemoveEdgelessVertices( ); }
// Input is a set of vertices v. Each v must be located at the opening of
// a bubble, with exactly two edges that lead to the same successor w:
// _-_
// --> v w -->
// -_-
template<class E> void digraphE<E>::PopBubbles( const vec<int> & bubble_vs )
{
vec<int> bubble_edges;
bubble_edges.reserve( bubble_vs.size() );
for ( int i = 0; i < bubble_vs.isize(); i++ ) {
int v = bubble_vs[i];
ForceAssertEq( from_[v].size(), 2u );
ForceAssertEq( from_[v][0], from_[v][1] );
// Choose one of the edges that make up this bubble, and delete it.
// Arbitrarily, we choose the higher-indexed path.
bubble_edges.push_back( from_edge_obj_[v][1] );
}
DeleteEdges( bubble_edges );
// Combine edges. For bubbles v->w in which v had only one predecessor
// and/or w had only one successor, this will combine the remaining edge in
// the bubble with the edge leading to/from the bubble.
RemoveUnneededVertices( );
// Clear out edges that have been removed from the graph.
RemoveDeadEdgeObjects( );
}
// Input is a set of vertices v. Each v must be located at the opening of
// a bubble, with two or more edges that lead to the same successor w:
// _-_
// - -
// --> v ----- w -->
// _ _
// -_-
template<class E> void digraphE<E>::PopHyperBubbles( const vec<int> & bubble_vs )
{
vec<int> bubble_edges;
bubble_edges.reserve( bubble_vs.size() );
for ( int i = 0; i < bubble_vs.isize(); i++ ) {
int v = bubble_vs[i];
ForceAssertGe( from_[v].size(), 2u );
ForceAssertEq( Min(from_[v]), Max(from_[v]) );
// Choose one of the edges that make up this bubble, and delete it.
// Arbitrarily, we choose the higher-indexed path.
for ( size_t ib = 1; ib < from_edge_obj_[v].size(); ib++ )
bubble_edges.push_back( from_edge_obj_[v][ib] );
}
DeleteEdges( bubble_edges );
// Combine edges. For bubbles v->w in which v had only one predecessor
// and/or w had only one successor, this will combine the remaining edge in
// the bubble with the edge leading to/from the bubble.
RemoveUnneededVertices( );
// Clear out edges that have been removed from the graph.
RemoveDeadEdgeObjects( );
}
template<class E> void digraphE<E>::RemoveEdgelessVertices(
const vec<int>& to_remove )
{ vec<Bool> remove( N( ), False );
for ( int i = 0; i < to_remove.isize( ); i++ )
remove[ to_remove[i] ] = True;
vec<int> new_vertex_id( N( ), -1 );
int id = 0;
for ( int i = 0; i < N( ); i++ )
{ if ( remove[i] )
{ ForceAssert( from_[i].empty( ) );
ForceAssert( to_[i].empty( ) ); }
else
{ new_vertex_id[i] = id;
++id; } }
EraseIf( from_, remove ), EraseIf( from_edge_obj_, remove );
EraseIf( to_, remove ), EraseIf( to_edge_obj_, remove );
for ( int i = 0; i < N( ); i++ )
{ for ( int j = 0; j < from_[i].isize( ); j++ )
from_[i][j] = new_vertex_id[ from_[i][j] ];
for ( int j = 0; j < to_[i].isize( ); j++ )
to_[i][j] = new_vertex_id[ to_[i][j] ]; } }
template<class E> void digraphE<E>::RemoveEdgelessVertices( )
{ vec<int> to_remove;
for ( int i = 0; i < N( ); i++ )
if ( from_[i].empty( ) && to_[i].empty( ) ) to_remove.push_back(i);
RemoveEdgelessVertices(to_remove); }
template<class V> void digraphV<V>::RemoveEdgelessVertices( )
{ vec<int> new_vertex_id( N( ), -1 );
int id = 0;
vec<Bool> remove( N( ), False );
for ( int i = 0; i < N( ); i++ )
{ if ( from_[i].empty( ) && to_[i].empty( ) ) remove[i] = True;
else
{ new_vertex_id[i] = id;
++id; } }
EraseIf( from_, remove ), EraseIf( to_, remove ), EraseIf( verts_, remove );
for ( int i = 0; i < N( ); i++ )
{ for ( int j = 0; j < from_[i].isize( ); j++ )
from_[i][j] = new_vertex_id[ from_[i][j] ];
for ( int j = 0; j < to_[i].isize( ); j++ )
to_[i][j] = new_vertex_id[ to_[i][j] ]; } }
template<class E> void digraphE<E>::Reverse( )
{ for ( int i = 0; i < N( ); i++ )
{ swap( from_[i], to_[i] );
swap( from_edge_obj_[i], to_edge_obj_[i] ); } }
template<class E> void digraphE<E>::ReverseComponent( int x )
{ equiv_rel e( N( ) );
for ( int v = 0; v < N( ); v++ )
{ for ( int i = 0; i < from_[v].isize( ); i++ )
{ int w = from_[v][i];
e.Join( v, w ); } }
vec<int> o;
e.Orbit( x, o );
for ( int j = 0; j < o.isize( ); j++ )
{ int i = o[j];
swap( from_[i], to_[i] );
swap( from_edge_obj_[i], to_edge_obj_[i] ); } }
template<class E> void digraphE<E>::ReorderVertices( const vec<int>& new_order )
{ ForceAssertEq( new_order.isize( ), N( ) );
vec<int> order_new( N( ) );
for ( int i = 0; i < N( ); i++ )
order_new[ new_order[i] ] = i;
PermuteVec( from_, order_new );
PermuteVec( from_edge_obj_, order_new );
PermuteVec( to_, order_new );
PermuteVec( to_edge_obj_, order_new );
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < from_[v].isize( ); j++ )
from_[v][j] = order_new[ from_[v][j] ];
for ( int j = 0; j < to_[v].isize( ); j++ )
to_[v][j] = order_new[ to_[v][j] ];
SortSync( from_[v], from_edge_obj_[v] );
SortSync( to_[v], to_edge_obj_[v] ); } }
template<class E> void digraphE<E>::ReorderComponents( const vec<int>& new_order )
{ equiv_rel e( N( ) );
for ( int v = 0; v < N( ); v++ )
{ for ( int i = 0; i < from_[v].isize( ); i++ )
{ int w = from_[v][i];
e.Join( v, w ); } }
vec<int> reps;
for ( int v = 0; v < N( ); v++ )
if ( e.Representative(v) ) reps.push_back(v);
ForceAssertEq( new_order.size( ), reps.size( ) );
vec<int> new_vertex_order;
for ( int i = 0; i < reps.isize( ); i++ )
{ int v = reps[ new_order[i] ];
vec<int> o;
e.Orbit( v, o );
new_vertex_order.append(o); }
ReorderVertices(new_vertex_order); }
template<class E> void
digraphE<E>::ComponentEdges( vec< vec<edge_t> >& edges ) const
{
vec<vec<int> > vertices;
Components( vertices );
int n = vertices.isize( );
edges.resize( 0 );
edges.resize( n );
for ( int i = 0; i < n; i++ ) {
for ( int j = 0; j < vertices[i].isize( ); j++ )
edges[i].append( FromEdgeObj( vertices[i][j] ) );
UniqueSort( edges[i] );
}
}
template<class F> void digraphE<F>::Append( const digraphE<F>& D )
{ int nedges = edges_.size( );
edges_.append( D.edges_ );
int nvertices = from_.size( );
from_.append( D.from_ );
to_.append( D.to_ );
from_edge_obj_.append( D.from_edge_obj_ );
to_edge_obj_.append( D.to_edge_obj_ );
for ( int i = nvertices; i < N( ); i++ )
{ for ( int j = 0; j < from_[i].isize( ); j++ )
{ from_[i][j] += nvertices;
from_edge_obj_[i][j] += nedges; }
for ( int j = 0; j < to_[i].isize( ); j++ )
{ to_[i][j] += nvertices;
to_edge_obj_[i][j] += nedges; } } }
template<class F> void digraphE<F>::SplitEdge( int v, int j, const F& e1, const F& e2 )
{ int n = N( );
int ne = edges_.size( );
edges_.push_back( e1, e2 );
int w = from_[v][j];
int we = from_edge_obj_[v][j];
int i = InputFromOutputTo( v, j );
from_[v].erase( from_[v].begin( ) + j );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j );
to_[w].erase( to_[w].begin( ) + i );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i );
from_[v].push_back(n), from_edge_obj_[v].push_back(ne);
vec<int> nfrom, nto;
vec<int> nfrom_edge_obj, nto_edge_obj;
nfrom.push_back(w), nfrom_edge_obj.push_back(ne+1);
nto.push_back(v), nto_edge_obj.push_back(ne);
from_.push_back(nfrom), to_.push_back(nto);
from_edge_obj_.push_back(nfrom_edge_obj);
to_edge_obj_.push_back(nto_edge_obj);
for ( int u = 0; u < to_[w].isize( ); u++ )
{ if ( to_[w][u] == v && we == to_edge_obj_[w][u] )
{ to_.erase( to_.begin( ) + u );
to_edge_obj_.erase( to_edge_obj_.begin( ) + u );
break; } }
to_[w].push_back(n), to_edge_obj_[w].push_back(ne+1); }
template<class F> void digraphE<F>::Glue( const EmbeddedSubPath<F>& a,
const EmbeddedSubPath<F>& b, const vec<int>& EE, const vec<int>& FF,
const digraphE<F>& c )
{
// Sanity check.
ForceAssertGe( a.NVertices( ), 2 ); ForceAssertGe( b.NVertices( ), 2 );
ForceAssert( !HasSharedEdge(a, b) );
ForceAssertEq( EE.isize( ), a.NVertices( ) );
ForceAssertEq( FF.isize( ), b.NVertices( ) );
vec<int> Esort = EE, Fsort = FF;
Sort(Esort), Sort(Fsort);
ForceAssert( Esort.UniqueOrdered( ) );
ForceAssert( Fsort.UniqueOrdered( ) );
ForceAssertEq( EE.front( ), 0 ); ForceAssertEq( EE.back( ), c.N( ) - 1 );
ForceAssertEq( FF.front( ), 0 ); ForceAssertEq( FF.back( ), c.N( ) - 1 );
// Delete edges appearing in a and b.
for ( int i = 0; i < a.NVertices( ) - 1; i++ )
{ int v = a.Vertex(i), w = a.Vertex(i+1);
int e = a.EdgeObjectIndexAbs(i);
int ef = EdgeObjectIndexToFromIndex( v, e );
int et = InputFromOutputTo( v, ef );
from_[v].erase( from_[v].begin( ) + ef );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + ef );
to_[w].erase( to_[w].begin( ) + et );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + et ); }
for ( int i = 0; i < b.NVertices( ) - 1; i++ )
{ int v = b.Vertex(i), w = b.Vertex(i+1);
int e = b.EdgeObjectIndexAbs(i);
int ef = EdgeObjectIndexToFromIndex( v, e );
int et = InputFromOutputTo( v, ef );
from_[v].erase( from_[v].begin( ) + ef );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + ef );
to_[w].erase( to_[w].begin( ) + et );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + et ); }
// Attach c.
int nvertices = N( );
Append(c);
for ( int i = 0; i < a.NVertices( ); i++ )
TransferEdges( a.Vertex(i), EE[i] + nvertices );
for ( int i = 0; i < b.NVertices( ); i++ )
TransferEdges( b.Vertex(i), FF[i] + nvertices );
// If data implies that some vertices in c should be identified, do so.
vec< vec<int> > sources( c.N( ) );
for ( int i = 0; i < a.NVertices( ); i++ )
sources[ EE[i] ].push_back( a.Vertex(i) );
for ( int i = 0; i < b.NVertices( ); i++ )
sources[ FF[i] ].push_back( b.Vertex(i) );
for ( int i = 0; i < c.N( ); i++ )
Sort( sources[i] );
for ( int i1 = 0; i1 < c.N( ); i1++ )
{ for ( int i2 = i1 + 1; i2 < c.N( ); i2++ )
{ if ( Meet( sources[i1], sources[i2] ) )
TransferEdges( i1 + nvertices, i2 + nvertices ); } } }
template<class E> void digraphE<E>::TransferEdges( int v, int w,
const Bool enter_only )
{ ForceAssert( v != w );
// Change edges v --> v to edges w --> w.
if ( !enter_only )
{
vec<Bool> remove_from_v;
remove_from_v.resize_and_set( from_[v].size( ), False );
for ( int i = 0; i < from_[v].isize( ); i++ )
{ if ( from_[v][i] == v )
{ from_[w].push_back(w);
from_edge_obj_[w].push_back( from_edge_obj_[v][i] );
to_[w].push_back(w);
to_edge_obj_[w].push_back( from_edge_obj_[v][i] );
remove_from_v[i] = True;
int j = InputFromOutputTo( v, i );
to_[v].erase( to_[v].begin( ) + j );
to_edge_obj_[v].erase( to_edge_obj_[v].begin( ) + j ); } }
EraseIf( from_[v], remove_from_v );
EraseIf( from_edge_obj_[v], remove_from_v );
SortSync( from_[w], from_edge_obj_[w] );
SortSync( to_[w], to_edge_obj_[w] );
}
// Change edges u --> v to edges u --> w.
for ( int i = 0; i < to_[v].isize( ); i++ )
{ int u = to_[v][i];
int j = InputToOutputFrom( v, i );
from_[u][j] = w;
SortSync( from_[u], from_edge_obj_[u] ); }
// Change edges v --> x to edges w --> x.
// if ( !enter_only )
{ for ( int i = 0; i < from_[v].isize( ); i++ )
{ int x = from_[v][i];
int j = InputFromOutputTo( v, i );
if ( !enter_only ) to_[x][j] = w;
else to_[x][j] = v;
SortSync( to_[x], to_edge_obj_[x] ); } }
// Do the rest.
if ( !enter_only )
{ from_[w].append( from_[v] );
from_edge_obj_[w].append( from_edge_obj_[v] ); }
SortSync( from_[w], from_edge_obj_[w] );
to_[w].append( to_[v] );
to_edge_obj_[w].append( to_edge_obj_[v] );
SortSync( to_[w], to_edge_obj_[w] );
to_[v].clear( ), to_edge_obj_[v].clear( );
if ( !enter_only ) { from_[v].clear( ), from_edge_obj_[v].clear( ); } }
template<class E> void digraphE<E>::RemoveDuplicateEdges( )
{ for ( int v = 0; v < N( ); v++ )
{ vec<Bool> remove;
remove.resize_and_set( from_[v].size( ), False );
for ( int j = 0; j < from_[v].isize( ); j++ )
{ int k;
for ( k = j + 1; k < from_[v].isize( ); k++ )
if ( from_[v][k] != from_[v][j] ) break;
for ( int u1 = j; u1 < k; u1++ )
{ if ( remove[u1] ) continue;
for ( int u2 = u1 + 1; u2 < k; u2++ )
{ if ( edges_[ from_edge_obj_[v][u1] ]
== edges_[ from_edge_obj_[v][u2] ] )
{ remove[u2] = True; } } }
j = k - 1; }
for ( int i = 0; i < remove.isize( ); i++ )
{ if ( remove[i] )
{ int w = from_[v][i];
int j = InputFromOutputTo( v, i );
to_[w].erase( to_[w].begin( ) + j );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j ); } }
EraseIf( from_[v], remove );
EraseIf( from_edge_obj_[v], remove ); } }
template<class E> void digraphE<E>::DeleteEdgesAtVertex( int v )
{ for ( int i = 0; i < from_[v].isize( ); i++ )
{ int w = from_[v][i];
int j = InputFromOutputTo( v, i );
if ( v == w ) continue;
to_[w].erase( to_[w].begin( ) + j );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j ); }
for ( int i = 0; i < to_[v].isize( ); i++ )
{ int w = to_[v][i];
int j = InputToOutputFrom( v, i );
if ( v == w ) continue;
from_[w].erase( from_[w].begin( ) + j );
from_edge_obj_[w].erase( from_edge_obj_[w].begin( ) + j ); }
from_[v].clear( ), from_edge_obj_[v].clear( );
to_[v].clear( ), to_edge_obj_[v].clear( ); }
template<class E> vec<int> digraphE<E>::RemoveDeadEdgeObjects( )
{ vec<Bool> used;
Used(used);
int count = 0;
vec<int> to_new_id( edges_.size( ), -1 );
for ( int i = 0; i < edges_.isize( ); i++ )
{ if ( used[i] )
{ if ( count != i ) edges_[count] = edges_[i];
to_new_id[i] = count;
++count; } }
edges_.resize(count);
for ( int v = 0; v < N( ); v++ )
{ for ( int i = 0; i < from_[v].isize( ); i++ )
from_edge_obj_[v][i] = to_new_id[ from_edge_obj_[v][i] ];
for ( int i = 0; i < to_[v].isize( ); i++ )
to_edge_obj_[v][i] = to_new_id[ to_edge_obj_[v][i] ]; }
return to_new_id;
}
template<class E> Bool digraphE<E>::TestValid( const Bool exit ) const
{ if ( !digraph(*this).TestValid( ) ) return False;
if ( from_edge_obj_.size( ) != to_edge_obj_.size( ) )
DIGRAPH_INVALID( "sizes of from_edge_obj_ and to_edge_obj_ are different", exit );
if ( from_.size( ) != from_edge_obj_.size( ) )
DIGRAPH_INVALID( "sizes of from_ and from_edge_obj_ are different", exit );
for ( int v = 0; v < N( ); v++ )
{ if ( from_[v].size( ) != from_edge_obj_[v].size( ) )
{ DIGRAPH_INVALID( "sizes of from_[" << v << "] and "
<< "from_edge_obj_[" << v << "] are different", exit ); } }
for ( int v = 0; v < N( ); v++ )
{ if ( to_[v].size( ) != to_edge_obj_[v].size( ) )
{ DIGRAPH_INVALID( "sizes of to_[" << v << "] and "
<< "to_edge_obj_[" << v << "] are different", exit ); } }
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < from_[v].isize( ); j++ )
{ int w = from_[v][j];
int ei = from_edge_obj_[v][j];
if ( ei < 0 || ei >= EdgeObjectCount( ) )
DIGRAPH_INVALID( "Illegal from_edge_obj value.", exit );
Bool found = False;
for ( int r = 0; r < to_[w].isize( ); r++ )
if ( to_[w][r] == v && to_edge_obj_[w][r] == ei ) found = True;
if ( !found )
{ DIGRAPH_INVALID( "There is an edge from " << v << " to " << w
<< " in from_[" << v
<< "], but not in to_[" << w << "].", exit ); } } }
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < to_[v].isize( ); j++ )
{ int w = to_[v][j];
int ei = to_edge_obj_[v][j];
if ( ei < 0 || ei >= EdgeObjectCount( ) )
DIGRAPH_INVALID( "Illegal to_edge_obj value.", exit );
Bool found = False;
for ( int r = 0; r < from_[w].isize( ); r++ )
{ if ( from_[w][r] == v && from_edge_obj_[w][r] == ei )
found = True; }
if ( !found )
{ DIGRAPH_INVALID( "There is an edge from " << v << " to " << w
<< " in to_[" << v << "], but not in from_[" << w
<< "].", exit ); } } }
return True; }
template<class F>
void digraphE<F>::Initialize( const vec< vec<int> >& from, const vec< vec<int> >& to,
const vec<F>& edges, const vec< vec<int> >& to_edge_obj,
const vec< vec<int> >& from_edge_obj, const Bool allow_unused_edges )
{ digraph::Initialize( from, to );
edges_ = edges;
to_edge_obj_ = to_edge_obj;
from_edge_obj_ = from_edge_obj;
int N = from.size( );
ForceAssertEq( N, to_edge_obj.isize( ) );
ForceAssertEq( N, from_edge_obj.isize( ) );
vec<int> used( edges.size( ), 0 );
for ( int i = 0; i < N; i++ )
{ ForceAssertEq( to_edge_obj[i].size( ), to[i].size( ) );
ForceAssertEq( from_edge_obj[i].size( ), from[i].size( ) );
for ( int j = 0; j < to_edge_obj[i].isize( ); j++ )
{ int o = to_edge_obj[i][j];
ForceAssertGe( o, 0 );
ForceAssertLt( o, edges.isize( ) );
++used[o];
int w = i, v = to_[i][j];
int wf = BinPosition( from[v], w );
// The following assert won't do what we want if there are multiple
// edges between two given vertices (in which case wf doesn't
// make sense).
// ForceAssertEq( o, from_edge_obj[v][wf] );
} }
for ( int i = 0; i < used.isize( ); i++ )
{ if ( used[i] > 1 || ( !allow_unused_edges && used[i] == 0 ) )
{ cout << "Edge " << i << " is used " << used[i]
<< " times, whereas it should be used exactly once.\n"; }
if (allow_unused_edges) ForceAssertLe( used[i], 1 );
else ForceAssertEq( used[i], 1 ); } }
template<class F>
digraphE<F>::digraphE( const vec< vec<int> >& from, const vec< vec<int> >& to,
const vec<F>& edges, const vec< vec<int> >& to_edge_obj,
const vec< vec<int> >& from_edge_obj, const Bool allow_unused_edges )
: digraph(from, to) // redundant with initialize?
{ Initialize( from, to, edges, to_edge_obj, from_edge_obj,
allow_unused_edges ); }
template<class V>
void digraphV<V>::Initialize( const vec< vec<int> >& from, const vec< vec<int> >& to,
const vec<V>& verts )
{ digraph::Initialize( from, to );
verts_ = verts;
ForceAssertEq( N( ), verts.isize( ) ); }
template<class V>
digraphV<V>::digraphV( const vec< vec<int> >& from, const vec< vec<int> >& to,
const vec<V>& verts )
: digraph(from, to) // redundant with initialize?
{ Initialize( from, to, verts ); }
template<class V, class E>
void digraphVE<V,E>::Initialize( const vec< vec<int> >& from,
const vec< vec<int> >& to, const vec<V>& verts, const vec<E>& edges,
const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj )
{ digraphE<E>::Initialize( from, to, edges, to_edge_obj, from_edge_obj );
verts_ = verts;
ForceAssertEq( from.size( ), verts.size( ) ); }
template<class V, class E>
digraphVE<V,E>::digraphVE( const vec< vec<int> >& from,
const vec< vec<int> >& to, const vec<V>& verts, const vec<E>& edges,
const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj )
: digraphE<E>( from, to, edges, to_edge_obj, from_edge_obj ) // redundant??
{ Initialize( from, to, verts, edges, to_edge_obj, from_edge_obj ); }
template<class V, class E>
digraphVE<V,E>::digraphVE( const digraphE<E>& G, const vec<V>& verts )
: digraphE<E>(G)
{ verts_ = verts;
ForceAssertEq( G.N( ), verts.isize( ) ); }
template<class E> Bool digraphE<E>::IsComplete(
const vec<int>& vertices, const vec<int>& edges ) const
{ ForceAssert( vertices.UniqueOrdered( ) );
ForceAssert( edges.UniqueOrdered( ) );
for ( int u = 0; u < vertices.isize( ); u++ )
{ int v = vertices[u];
for ( int j = 0; j < From(v).isize( ); j++ )
{ int w = From(v)[j];
if ( !BinMember( vertices, w ) ) return False;
int e = EdgeObjectIndexByIndexFrom( v, j );
if ( !BinMember( edges, e ) ) return False; }
for ( int j = 0; j < To(v).isize( ); j++ )
{ int w = To(v)[j];
if ( !BinMember( vertices, w ) ) return False;
int e = EdgeObjectIndexByIndexTo( v, j );
if ( !BinMember( edges, e ) ) return False; } }
return True; }
template<class E> void digraphE<E>::DualComponentRelation(
equiv_rel& e, const vec<Bool>& exclude ) const
{ e.Initialize( EdgeObjectCount( ) );
for ( int v = 0; v < N( ); v++ )
{ for ( int j1 = 0; j1 < To(v).isize( ); j1++ )
{ int e1 = EdgeObjectIndexByIndexTo( v, j1 );
if ( exclude.nonempty( ) && exclude[e1] ) continue;
for ( int j2 = 0; j2 < From(v).isize( ); j2++ )
{ int e2 = EdgeObjectIndexByIndexFrom( v, j2 );
if ( exclude.nonempty( ) && exclude[e2] ) continue;
e.Join(e1, e2); } } } }
template<class E> void digraphE<E>::Initialize(
const digraphE& g, const equiv_rel& e )
{ edges_ = g.edges_;
vec<int> reps;
e.OrbitRepsAlt(reps);
int nreps = reps.size( );
vec<int> to_reps( g.N( ) );
for ( int i = 0; i < nreps; i++ )
{ vec<int> o;
e.Orbit( reps[i], o );
for ( int j = 0; j < o.isize( ); j++ )
to_reps[ o[j] ] = i; }
from_.resize(nreps), to_.resize(nreps);
from_edge_obj_.resize(nreps), to_edge_obj_.resize(nreps);
int nedges = g.EdgeObjectCount( );
vec<int> to_left_vertex(nedges, -1), to_right_vertex(nedges, -1);
for ( int w = 0; w < g.N( ); w++ )
{ for ( int j = 0; j < g.To(w).isize( ); j++ )
{ int m = g.EdgeObjectIndexByIndexTo( w, j );
int v = g.To(w)[j];
to_left_vertex[m] = v, to_right_vertex[m] = w; } }
for ( int m = 0; m < nedges; m++ )
{ if ( to_left_vertex[m] < 0 || to_right_vertex[m] < 0 ) continue;
int v = to_reps[ to_left_vertex[m] ];
int w = to_reps[ to_right_vertex[m] ];
from_[v].push_back(w), to_[w].push_back(v);
from_edge_obj_[v].push_back(m), to_edge_obj_[w].push_back(m); }
for ( int v = 0; v < N( ); v++ )
{ SortSync( from_[v], from_edge_obj_[v] );
SortSync( to_[v], to_edge_obj_[v] ); } }
template<class E> digraphE<E>::digraphE(
const digraphE& g, const equiv_rel& e ) : edges_( g.edges_ )
{ Initialize( g, e ); }
template<class E> digraphE<E>::digraphE( const vec<digraphE>& g )
{ Initialize(g); }
template<class E> void digraphE<E>::Initialize( const vec<digraphE>& g )
{ for ( int i = 0; i < g.isize( ); i++ )
Append( g[i] ); }
template<class F> void digraphE<F>::Initialize( const vec<digraphE>& g,
const vec< pair< pair<int,int>, pair<int,int> > >& joins )
{ digraphE<F> G(g);
equiv_rel e( G.N( ) );
vec<int> start( g.isize( ) );
start[0] = 0;
for ( int i = 1; i < g.isize( ); i++ )
start[i] = start[i-1] + g[i-1].N( );
for ( int i = 0; i < joins.isize( ); i++ )
{ int v = start[ joins[i].first.first ] + joins[i].first.second;
int w = start[ joins[i].second.first ] + joins[i].second.second;
e.Join( v, w ); }
Initialize( G, e ); }
template<class F> digraphE<F>::digraphE( const vec<digraphE>& g,
const vec< pair< pair<int,int>, pair<int,int> > >& joins )
{ Initialize( g, joins ); }
template<class F> void digraphE<F>::Initialize( const digraph& g, const vec<F>& edges ){
int nedges = g.N();
ForceAssertEq( nedges, edges.isize() );
equiv_rel e( 2*nedges );
for ( int v = 0; v < nedges; v++ ){
for ( size_t j = 0; j < g.From(v).size( ); j++ ){
int w = g.From(v)[j];
e.Join( 2*v + 1, 2*w );
}
}
vec<int> reps;
e.OrbitRepsAlt(reps);
int N = reps.size( );
vec< vec<int> > from(N), to(N);
vec< vec<int> > from_edge_obj(N), to_edge_obj(N);
for ( int i = 0; i < edges.isize( ); i++ ){
int x = BinPosition( reps, e.ClassId( 2*i ) );
int y = BinPosition( reps, e.ClassId( 2*i + 1 ) );
from[x].push_back(y), to[y].push_back(x);
from_edge_obj[x].push_back(i), to_edge_obj[y].push_back(i);
}
for ( int i = 0; i < N; i++ ){
SortSync( from[i], from_edge_obj[i] );
SortSync( to[i], to_edge_obj[i] );
}
Initialize( from, to, edges, to_edge_obj, from_edge_obj );
}
template<class F> digraphE<F>::digraphE( const digraph& g, const vec<F>& edges ){
Initialize( g, edges );
}
template<class E> digraphE<E>::digraphE( const digraph& g ){
vec<int> edges( g.N(), vec<int>::IDENTITY );
Initialize( g, edges );
}
template<class F> Bool digraphE<F>::ThisClose( int v, int w, F d ) const
{ if ( d < 0 ) return False;
if ( v == w ) return True;
set< pair<int,F> > unprocessed, processed;
unprocessed.insert( make_pair( v, 0 ) );
while( !unprocessed.empty( ) )
{ int x = unprocessed.begin( )->first;
F dx = unprocessed.begin( )->second;
typename set< pair<int,F> >::iterator u
= processed.lower_bound( make_pair( x, 0 ) );
unprocessed.erase( unprocessed.begin( ) );
if ( u != processed.end( ) && u->first == x )
{ if ( u->second <= dx ) continue;
processed.erase(u); }
processed.insert( make_pair( x, dx ) );
for ( int j = 0; j < From(x).isize( ); j++ )
{ int y = From(x)[j];
F dy = dx + EdgeObjectByIndexFrom( x, j );
if ( dy > d ) continue;
if ( y == w ) return True;
typename set< pair<int,F> >::iterator p
= processed.lower_bound( make_pair( y, 0 ) );
if ( p != processed.end( ) && p->first == y )
{ if ( p->second <= dy ) continue;
processed.erase(p); }
typename set< pair<int,F> >::iterator u
= unprocessed.lower_bound( make_pair( y, 0 ) );
if ( u != unprocessed.end( ) && u->first == y )
{ if ( u->second <= dy ) continue;
unprocessed.erase(u); }
unprocessed.insert( make_pair( y, dy ) ); } }
return False; }
template<class E> void digraphE<E>::ToLeft( vec<int>& to_left ) const
{ to_left.resize( EdgeObjectCount( ) );
for ( int i = 0; i < N( ); i++ )
{ for ( int j = 0; j < From(i).isize( ); j++ )
{ int e = EdgeObjectIndexByIndexFrom( i, j );
to_left[e] = i; } } }
template<class E> void digraphE<E>::ToRight( vec<int>& to_right ) const
{ to_right.resize( EdgeObjectCount( ) );
for ( int i = 0; i < N( ); i++ )
{ for ( int j = 0; j < To(i).isize( ); j++ )
{ int e = EdgeObjectIndexByIndexTo( i, j );
to_right[e] = i; } } }
template<class F>
void digraphE<F>::GetSuccessors( const vec<int>& v, vec< pair<int,F> >& from_v )
{ set< pair<int,F> > check, fromv;
for ( int i = 0; i < v.isize( ); i++ )
check.insert( make_pair( v[i], 0 ) );
while( !check.empty( ) )
{ int x = check.begin( )->first;
F dx = check.begin( )->second;
typename set< pair<int,F> >::iterator u
= fromv.lower_bound( make_pair( x, 0 ) );
check.erase( check.begin( ) );
if ( u != fromv.end( ) && u->first == x )
{ if ( u->second <= x ) continue;
fromv.erase(u); }
fromv.insert( make_pair( x, dx ) );
for ( int i = 0; i < From(x).isize( ); i++ )
{ int y = From(x)[i];
F dy = dx + EdgeObjectByIndexFrom( x, i );
typename set< pair<int,F> >::iterator a
= check.lower_bound( make_pair( y, 0 ) );
if ( a != check.end( ) && a->first == y )
{ if ( a->second <= dy ) continue;
check.erase(a); }
typename set< pair<int,F> >::iterator b
= fromv.lower_bound( make_pair( y, 0 ) );
if ( b != fromv.end( ) && b->first == y )
{ if ( b->second <= dy ) continue;
fromv.erase(b); }
check.insert( make_pair( y, dy ) ); } }
from_v.clear( );
for ( typename set< pair<int,F> >::iterator i = fromv.begin( );
i != fromv.end( ); ++i )
{ from_v.push_back(*i); } }
template<class F>
void PrintEdge( const int v, const int w, const int ei, const vec<double>& lengths,
const vec<Bool>* dashed, const vec<String>* edge_color, const int tiny_top,
const typename digraphE<F>::edge_label_info eli, ostream& out )
{
float wd = 0.1; // this value not used
String color, label;
Bool bold = False;
Bool is_dashed = False;
if ( dashed != NULL ) is_dashed = (*dashed)[ei];
double len = lengths[ei];
if ( len < tiny_top )
{ color = "gray";
if ( v == w ) label = ToString( len, 0 );
wd = 1.0; }
else if ( len >= tiny_top && len < 1000.0 )
{ color = "black";
wd = 2.0; }
else if ( len >= 1000.0 && len < 10000.0 )
{ color = "red";
wd = 4.0;
label = ToString( len/1000.0, 1 ) + " kb"; }
else
{ color = "magenta";
bold = True;
wd = 8.0;
label = ToString( len/1000.0, 0 ) + " kb"; }
if ( edge_color != NULL && (*edge_color)[ei] != "" ) color = (*edge_color)[ei];
out << v << " -> " << w << " [minlen=" << wd << ",color=" << color;
if ( color == "brown" ) out << ",penwidth=4";
if (is_dashed) out << ",style=dashed";
else if (bold) out << ",style=bold";
if ( eli.edge_id_names != NULL )
{ if ( label == "" ) label = (*eli.edge_id_names)[ei];
else
{ label = (*eli.edge_id_names)[ei] + " (" + label + ")"; } }
else if ( eli.label_edges )
{ if ( label == "" )
label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) );
else
{ label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei)
: ToString(ei) ) + " (" + label + ")"; } }
if ( eli.label_edges_extra ) label += " " + (*eli.label_edges_extra)[ei];
if ( label != "" ) out << ",label=\"" << label << "\""; }
template<class F>
void PrintEdge2( const int v, const int w, const int ei, const vec<double>& lengths,
const vec<Bool>* dashed, const vec<String>* edge_color, const int tiny_top,
const typename digraphEX<F>::edge_label_info eli, ostream& out )
{
float wd = 0.1; // this value not used
String color, label;
Bool bold = False;
Bool is_dashed = False;
if ( dashed != NULL ) is_dashed = (*dashed)[ei];
double len = lengths[ei];
if ( len < tiny_top )
{ color = "gray";
if ( v == w ) label = ToString( len, 0 );
wd = 1.0; }
else if ( len >= tiny_top && len < 1000.0 )
{ color = "black";
wd = 2.0; }
else if ( len >= 1000.0 && len < 10000.0 )
{ color = "red";
wd = 4.0;
label = ToString( len/1000.0, 1 ) + " kb"; }
else
{ color = "magenta";
bold = True;
wd = 8.0;
label = ToString( len/1000.0, 0 ) + " kb"; }
if ( edge_color != NULL && (*edge_color)[ei] != "" ) color = (*edge_color)[ei];
out << v << " -> " << w << " [minlen=" << wd << ",color=" << color;
if ( color == "brown" ) out << ",penwidth=4";
if (is_dashed) out << ",style=dashed";
else if (bold) out << ",style=bold";
if ( eli.edge_id_names != NULL )
{ if ( label == "" ) label = (*eli.edge_id_names)[ei];
else
{ label = (*eli.edge_id_names)[ei] + " (" + label + ")"; } }
else if ( eli.label_edges )
{ if ( label == "" )
label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) );
else
{ label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei)
: ToString(ei) ) + " (" + label + ")"; } }
if ( eli.label_edges_extra ) label += " " + (*eli.label_edges_extra)[ei];
if ( label != "" ) out << ",label=\"" << label << "\""; }
template<class E> void FindLeftMostVertex( const digraphE<E>& G,
const vec<double>& lengths, const vec<int>& o, const vec<Bool>* invisible,
int& leftv )
{
// Restrict attention to visible vertices.
vec<int> oo;
for ( int i1 = 0; i1 < o.isize( ); i1++ )
{ int v = o[i1];
if ( invisible != NULL )
{ Bool owned = False;
for ( int j = 0; j < G.From(v).isize( ); j++ )
{ int e = G.EdgeObjectIndexByIndexFrom( v, j );
if ( !(*invisible)[e] ) owned = True; }
for ( int j = 0; j < G.To(v).isize( ); j++ )
{ int e = G.EdgeObjectIndexByIndexTo( v, j );
if ( !(*invisible)[e] ) owned = True; }
if ( !owned ) continue; }
oo.push_back(v); }
Sort(oo);
vec<float> pos( oo.size( ) );
vec<Bool> placed( oo.size( ), False );
pos[0] = 0.0, placed[0] = True;
// Note that the following block of code is very slow on large components.
// Performance may be OK now.
while( Sum(placed) < oo.isize( ) )
{ Bool progress = False;
for ( int i1 = 0; i1 < oo.isize( ); i1++ )
{ int v = oo[i1];
for ( int j = 0; j < G.From(v).isize( ); j++ )
{ int w = G.From(v)[j];
int i2 = BinPosition( oo, w );
if ( i2 < 0 ) continue;
if ( !( placed[i1] ^ placed[i2] ) ) continue;
progress = True;
edge_t e = G.EdgeObjectIndexByIndexFrom( v, j );
if ( placed[i1] ) pos[i2] = pos[i1] + lengths[e];
else pos[i1] = pos[i2] - lengths[e];
placed[i1] = placed[i2] = True; } }
if ( !progress ) break; }
float left = Min(pos);
int leftj = 0;
for ( leftj = 0; leftj < pos.isize( ); leftj++ )
if ( pos[leftj] == left ) break;
leftv = oo[leftj]; }
template<class E> void FindLeftMostVertex( const digraphEX<E>& G,
const vec<double>& lengths, const vec<int>& o, const vec<Bool>* invisible,
int& leftv )
{
// Restrict attention to visible vertices.
vec<int> oo;
for ( int i1 = 0; i1 < o.isize( ); i1++ )
{ int v = o[i1];
if ( invisible != NULL )
{ Bool owned = False;
for ( int j = 0; j < (int) G.From(v).size( ); j++ )
{ int e = G.IFrom( v, j );
if ( !(*invisible)[e] ) owned = True; }
for ( int j = 0; j < (int) G.To(v).size( ); j++ )
{ int e = G.ITo( v, j );
if ( !(*invisible)[e] ) owned = True; }
if ( !owned ) continue; }
oo.push_back(v); }
Sort(oo);
vec<float> pos( oo.size( ) );
vec<Bool> placed( oo.size( ), False );
pos[0] = 0.0, placed[0] = True;
// Note that the following block of code is very slow on large components.
// Performance may be OK now.
while( Sum(placed) < oo.isize( ) )
{ Bool progress = False;
for ( int i1 = 0; i1 < oo.isize( ); i1++ )
{ int v = oo[i1];
for ( int j = 0; j < (int) G.From(v).size( ); j++ )
{ int w = G.From(v)[j];
int i2 = BinPosition( oo, w );
if ( i2 < 0 ) continue;
if ( !( placed[i1] ^ placed[i2] ) ) continue;
progress = True;
edge_t e = G.IFrom( v, j );
if ( placed[i1] ) pos[i2] = pos[i1] + lengths[e];
else pos[i1] = pos[i2] - lengths[e];
placed[i1] = placed[i2] = True; } }
if ( !progress ) break; }
float left = Min(pos);
int leftj = 0;
for ( leftj = 0; leftj < pos.isize( ); leftj++ )
if ( pos[leftj] == left ) break;
leftv = oo[leftj]; }
template<class E> void LabelTransitionVertices( const digraphE<E>& G,
const int v, const vec<Bool>* invisible, ostream& out )
{ int vis_count = 0, invis_count = 0;
for ( int j = 0; j < G.From(v).isize( ); j++ )
{ int ei = G.EdgeObjectIndexByIndexFrom( v, j );
if ( (*invisible)[ei] ) invis_count++;
else vis_count++; }
for ( int j = 0; j < G.To(v).isize( ); j++ )
{ int ei = G.EdgeObjectIndexByIndexTo( v, j );
if ( (*invisible)[ei] ) invis_count++;
else vis_count++; }
if ( vis_count > 0 && invis_count > 0 ) out << v << " [color=red];\n"; }
template<class E> void LabelTransitionVertices( const digraphEX<E>& G,
const int v, const vec<Bool>* invisible, ostream& out )
{ int vis_count = 0, invis_count = 0;
for ( int j = 0; j < (int) G.From(v).size( ); j++ )
{ int ei = G.IFrom( v, j );
if ( (*invisible)[ei] ) invis_count++;
else vis_count++; }
for ( int j = 0; j < (int) G.To(v).size( ); j++ )
{ int ei = G.ITo( v, j );
if ( (*invisible)[ei] ) invis_count++;
else vis_count++; }
if ( vis_count > 0 && invis_count > 0 ) out << v << " [color=red];\n"; }
template<class E> void CreateContigLabels( const vec<vec<int>>& components,
const vec<String>* label_contigs_extra, vec<String>& contig_labels0,
vec<String>& contig_labels )
{ vec<int> label_distance;
if ( label_contigs_extra ) contig_labels0 = *label_contigs_extra;
else
{ contig_labels0.resize( components.size( ) );
for (size_t ii=0; ii<components.size( ); ii++)
contig_labels0[ii] = "contig " + ToString( ii ); }
label_distance.resize( contig_labels0.size( ), 0 );
for (int ii=0; ii<(int)contig_labels0.size( ); ii++)
label_distance[ii] = 1 + (int)( contig_labels0[ii].size( ) / 2 );
for ( int i = 0; i < contig_labels0.isize( ); i++ )
{ contig_labels.push_back( ",taillabel=\"" + contig_labels0[i]
+ "\",labelangle=180," + "weight=10000," + "labeldistance="
+ ToString(label_distance[i]) + ",labelfontsize=18,"
+ "labelfontname=\"Times-Bold\"" ); } }
template<class E>
void DotHeader( const Bool label_contigs, const Bool label_vertices,
const String layout, const double fontsize, const double scale,
const double aspect, ostream& out )
{ out << "digraph G {\n\n";
if (label_vertices)
{ out << "node [width=" << scale * 0.1 << ",height=" << scale * 0.1
<< ",fontsize=12,shape=plaintext];\n"; }
else
{ out << "node [width=" << scale * 0.1
<< ",height=" << scale * 0.1 << ",fontsize=10,shape=point];\n"; }
out << "edge [fontsize=" << fontsize
<< ",penwidth=" << scale * 1.0
<< ",arrowsize=" << scale * 1.0
<< ",fontname=Arial];\n";
if (label_contigs) out << "margin=1.0;\n";
out << "rankdir=LR;\n";
out << "labeljust=l;\n";
out << "margin=0;\n";
if ( layout != "" ) out << "layout=" << layout << ";\n";
if ( aspect >= 0 ) out << "ratio=" << aspect << ";\n"; }
template<class F> void
digraphE<F>::PrettyDOT( ostream& out, const vec<double>& lengths,
const edge_label_info eli, Bool label_contigs, Bool label_vertices,
const vec<int>* componentsToPrint, const vec<String> *label_contigs_extra,
const vec<int> *verticesToPrint, const vec<Bool>* dashed,
const vec<Bool>* invisible, const vec<String>* edge_color,
const vec<int>* pen_widths, const String layout, const double tiny_top,
const double fontsize, const double scale, const double aspect ) const
{
// Define components and those that are selected.
vec< vec<int> > components;
if ( invisible == NULL ) Components(components);
else
{ vec<int> to_left, to_right;
ToLeft(to_left), ToRight(to_right);
vec<Bool> invis( N( ), True );
for ( int e = 0; e < EdgeObjectCount( ); e++ )
{ if ( !(*invisible)[e] )
invis[ to_left[e] ] = invis[ to_right[e] ] = False; }
Components( components, &invis ); }
vec<int> select;
if (componentsToPrint) select = *componentsToPrint;
else
{ select.reserve( components.size( ) );
for (int ii=0; ii<(int)components.size( ); ii++)
select.push_back( ii ); }
// Set up output and contig labels.
DotHeader<F>(
label_contigs, label_vertices, layout, fontsize, scale, aspect, out );
vec<String> contig_labels0, contig_labels;
if (label_contigs)
{ CreateContigLabels<F>( components, label_contigs_extra,
contig_labels0, contig_labels ); }
// Define vertices to skip.
vec<bool> skip_vtx;
if (verticesToPrint)
{ skip_vtx.resize( this->N( ), true );
for (size_t ii=0; ii<verticesToPrint->size( ); ii++)
skip_vtx[ (*verticesToPrint)[ii] ] = false; }
// Print the contigs. We put each contig in its own cluster (the
// subgraph's name MUST start with "cluster" for this to have any effect).
for ( int sel_id = select.isize( ) - 1; sel_id >= 0; sel_id-- )
{ int i = select[sel_id];
vec<int> &o = components[i];
if ( invisible != NULL )
{ int vis_count = 0;
for ( int vi = 0; vi < o.isize( ); vi++ )
{ int v = o[vi];
if ( verticesToPrint && skip_vtx[v] ) continue;
for ( int j = 0; j < From(v).isize( ); j++ )
{ int ei = EdgeObjectIndexByIndexFrom( v, j );
if ( !(*invisible)[ei] ) vis_count++; } }
if ( vis_count == 0 ) continue; }
out << "\nsubgraph cluster" << i << " {\n";
out << "color=white;\n";
if ( label_contigs && label_contigs_extra )
{ out << "label=\"" << contig_labels0[i] << "\"," << "fontsize=18,"
<< "fontname=\"Times-Bold\"\n"; }
// Find "leftmost" vertex in graph.
Sort(o);
int leftv;
FindLeftMostVertex( *this, lengths, o, invisible, leftv );
// Print component.
for ( int vi = 0; vi < o.isize( ); vi++ )
{ int v = o[vi];
if ( verticesToPrint && skip_vtx[v] ) continue;
if (label_vertices)
{ out << v << " [label=" << "\"" << v << "\""
<< ",fontcolor=black];\n"; }
// If some edges touching a vertex are invisible, and some are
// visible, make vertex red. Note incompatibility with
// label_vertices.
else if ( invisible != NULL )
{ LabelTransitionVertices( *this, v, invisible, out ); }
for ( int j = 0; j < From(v).isize( ); j++ )
{ int ei = EdgeObjectIndexByIndexFrom( v, j );
if ( invisible != NULL && (*invisible)[ei] ) continue;
int w = From(v)[j];
PrintEdge<F>(
v, w, ei, lengths, dashed, edge_color, tiny_top, eli, out );
if ( label_contigs && v == leftv && j == 0
&& !label_contigs_extra )
{ out << contig_labels[i]; }
if ( pen_widths != NULL && (*pen_widths)[ei] > 0 )
out << ",penwidth=" << (*pen_widths)[ei];
out << "];\n"; } }
out << "}\n"; }
out << "\n}" << endl;
out << "#done" << endl; }
template<class F> void
digraphEX<F>::PrettyDOT( ostream& out, const vec<double>& lengths,
const edge_label_info eli, Bool label_contigs, Bool label_vertices,
const vec<int>* componentsToPrint, const vec<String> *label_contigs_extra,
const vec<int> *verticesToPrint, const vec<Bool>* dashed,
const vec<Bool>* invisible, const vec<String>* edge_color,
const vec<int>* pen_widths, const String layout, const double tiny_top,
const double fontsize, const double scale, const double aspect ) const
{
// Define components and those that are selected.
vec< vec<int> > components;
if ( invisible == NULL ) Components(components);
else
{ vec<Bool> invis( N( ), True );
for ( int e = 0; e < E( ); e++ )
{ if ( !(*invisible)[e] )
invis[ ToLeft(e) ] = invis[ ToRight(e) ] = False; }
Components( components, &invis ); }
vec<int> select;
if (componentsToPrint) select = *componentsToPrint;
else
{ select.reserve( components.size( ) );
for (int ii=0; ii<(int)components.size( ); ii++)
select.push_back( ii ); }
// Set up output and contig labels.
DotHeader<F>(
label_contigs, label_vertices, layout, fontsize, scale, aspect, out );
vec<String> contig_labels0, contig_labels;
if (label_contigs)
{ CreateContigLabels<F>( components, label_contigs_extra,
contig_labels0, contig_labels ); }
// Define vertices to skip.
vec<bool> skip_vtx;
if (verticesToPrint)
{ skip_vtx.resize( this->N( ), true );
for (size_t ii=0; ii<verticesToPrint->size( ); ii++)
skip_vtx[ (*verticesToPrint)[ii] ] = false; }
// Print the contigs. We put each contig in its own cluster (the
// subgraph's name MUST start with "cluster" for this to have any effect).
for ( int sel_id = select.isize( ) - 1; sel_id >= 0; sel_id-- )
{ int i = select[sel_id];
vec<int> &o = components[i];
if ( invisible != NULL )
{ int vis_count = 0;
for ( int vi = 0; vi < o.isize( ); vi++ )
{ int v = o[vi];
if ( verticesToPrint && skip_vtx[v] ) continue;
for ( int j = 0; j < (int) From(v).size( ); j++ )
{ int ei = IFrom( v, j );
if ( !(*invisible)[ei] ) vis_count++; } }
if ( vis_count == 0 ) continue; }
out << "\nsubgraph cluster" << i << " {\n";
out << "color=white;\n";
if ( label_contigs && label_contigs_extra )
{ out << "label=\"" << contig_labels0[i] << "\"," << "fontsize=18,"
<< "fontname=\"Times-Bold\"\n"; }
// Find "leftmost" vertex in graph.
Sort(o);
int leftv;
FindLeftMostVertex( *this, lengths, o, invisible, leftv );
// Print component.
for ( int vi = 0; vi < o.isize( ); vi++ )
{ int v = o[vi];
if ( verticesToPrint && skip_vtx[v] ) continue;
if (label_vertices)
{ out << v << " [label=" << "\"" << v << "\""
<< ",fontcolor=black];\n"; }
// If some edges touching a vertex are invisible, and some are
// visible, make vertex red. Note incompatibility with
// label_vertices.
else if ( invisible != NULL )
{ LabelTransitionVertices( *this, v, invisible, out ); }
for ( int j = 0; j < (int) From(v).size( ); j++ )
{ int ei = IFrom( v, j );
if ( invisible != NULL && (*invisible)[ei] ) continue;
int w = From(v)[j];
PrintEdge2<F>(
v, w, ei, lengths, dashed, edge_color, tiny_top, eli, out );
if ( label_contigs && v == leftv && j == 0
&& !label_contigs_extra )
{ out << contig_labels[i]; }
if ( pen_widths != NULL && (*pen_widths)[ei] > 0 )
out << ",penwidth=" << (*pen_widths)[ei];
out << "];\n"; } }
out << "}\n"; }
out << "\n}" << endl;
out << "#done" << endl; }
// Method: DumpGraphML
// Output the digraph structure in a textual format that can be easily
// read without reference to our code base.
template<class E>
void
digraphE<E>::DumpGraphML( const String& graphMLFileName ) const
{
vec< vec< String > > edgeLabels( N() );
for ( int v = 0; v < N( ); v++ ) {
for ( int j = 0; j < From(v).isize( ); j++ ) {
int w = From(v)[j];
edgeLabels[ v ].push_back( BaseAlpha( EdgeObjectIndexByIndexFrom( v, j ) ) );
}
}
Ofstream( grml, graphMLFileName );
WriteGraphML( grml, edgeLabels );
}
template<class E> void digraphE<E>::ComponentsE( vec< vec<int> >& comp ) const
{ comp.clear( );
equiv_rel e( N( ) );
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < From(v).isize( ); j++ )
e.Join( v, From(v)[j] ); }
for ( int x = 0; x < N( ); x++ )
{ if ( e.Representative(x) )
{ vec<int> o;
e.Orbit( x, o );
Sort(o);
vec<int> C;
for ( int i = 0; i < o.isize( ); i++ )
{ int v = o[i];
for ( int j = 0; j < From(v).isize( ); j++ )
C.push_back( EdgeObjectIndexByIndexFrom( v, j ) ); }
comp.push_back(C); } } }
template<class E> void LongestPath( const digraphE<E>& G, int (E::*len)( ) const,
vec<int>& a_longest_path )
{ vec<int> D;
const int infinity = 2000000000;
DistancesToEnd( G, len, infinity, True, D );
int M = 0, v = 0;
for ( int x = 0; x < G.N( ); x++ )
if ( D[x] > M ) { M = D[x], v = x; }
a_longest_path.clear( );
while( G.From(v).nonempty( ) )
{ for ( int j = 0; j < G.From(v).isize( ); j++ )
{ int w = G.From(v)[j];
if ( D[w] == D[v] - ((G.EdgeObjectByIndexFrom( v, j )).*len)( ) )
{ a_longest_path.push_back( G.EdgeObjectIndexByIndexFrom( v, j ) );
v = w;
break; } } } }
template<class E> void DistancesToEndArr( const digraphE<E>& G,
vec<int> const& edgeLens, const int max_dist, const Bool fw, vec<int>& D )
{
// Let D(v) be the maximum length of a path starting at v, to be computed.
// Define initial values for D(v) to be 'undefined', except for sinks,
// which are zero.
D.resize_and_set( G.N( ), -1 );
for ( int v = 0; v < G.N( ); v++ )
{ if ( fw && G.Sink(v) ) D[v] = 0;
if ( !fw && G.Source(v) ) D[v] = 0; }
// Initialize vertices to process.
vec<Bool> to_process( G.N( ), False );
vec<int> to_processx;
for ( int v = 0; v < G.N( ); v++ )
{ if ( (fw && G.Sink(v)) || (!fw && G.Source(v)) )
{ to_process[v] = True, to_processx.push_back(v); } }
// Now compute D. Uncomputed values are set to 'infinity'.
while( to_processx.nonempty( ) )
{ int v = to_processx.back( );
to_processx.pop_back( );
to_process[v] = False;
for ( int j = 0; j < (fw ? G.To(v) : G.From(v) ).isize( ); j++ )
{ int w = ( fw ? G.To(v) : G.From(v) )[j];
if ( D[w] >= max_dist ) continue;
int edgeId = ( fw ? G.EdgeObjectIndexByIndexTo(v, j)
: G.EdgeObjectIndexByIndexFrom(v, j) );
int Dw_new = edgeLens[edgeId] + D[v];
if ( Dw_new > D[w] )
{ D[w] = Dw_new;
if ( !to_process[w] )
{ to_process[w] = True;
to_processx.push_back(w); } } } }
for ( int v = 0; v < G.N( ); v++ )
if ( D[v] < 0 ) D[v] = max_dist; }
template<class E> void RemoveHangingEnds( digraphE<E>& G,
int (E::*len)( ) const, const int max_del, const double min_ratio )
{
// Track hanging ends.
vec<Bool> hanging( G.EdgeObjectCount( ), False );
// Define the maximum length that we care about.
const int max_dist = int( ceil( double(max_del) * min_ratio ) );
// Go through two passes (forward and reverse).
for ( int pass = 1; pass <= 2; pass++ )
{
// Compute distances to end.
vec<int> D;
DistancesToEnd( G, len, max_dist, pass == 1, D );
// Identify hanging ends.
for ( int v = 0; v < G.N( ); v++ )
{ const vec<int>& V = ( pass == 1 ? G.From(v) : G.To(v) );
vec<int> d( V.size( ) );
vec<int> id( V.size( ), vec<int>::IDENTITY );
for ( int j = 0; j < V.isize( ); j++ )
{ d[j] = ((pass == 1
? G.EdgeObjectByIndexFrom(v,j) : G.EdgeObjectByIndexTo(v,j))
.*len)( ) + D[ V[j] ]; }
ReverseSortSync( d, id );
for ( int j = 1; j < d.isize( ); j++ )
{ if ( d[j] <= max_del && d[0] >= d[j] * min_ratio )
{ hanging[ ( pass == 1
? G.EdgeObjectIndexByIndexFrom( v, id[j] )
: G.EdgeObjectIndexByIndexTo( v, id[j] ) ) ]
= True; } } } }
// Remove hanging ends.
vec<int> to_delete;
for ( int i = 0; i < G.EdgeObjectCount( ); i++ )
if ( hanging[i] ) to_delete.push_back(i);
G.DeleteEdges(to_delete); }
// Remove short hanging ends. Look for
//
// x
// |
// e
// |
// u --c--> v --d--> w
//
// where x is a source or sink, e is short (and can go either way), whereas
// c and d are long. Works for T = HyperKmerPath and T = HyperFastavector.
template<class T> void RemoveHangingEnds2( T& h,const int max_del,
const double min_ratio )
{
for ( int x = 0; x < h.N( ); x++ ) {
// Check that basic assumptions are satisfied, including length(e) <= 5kb.
int v, c, d, e;
if ( h.Source(x) && h.From(x).size( ) == 1 ) {
v = h.From(x)[0];
e = h.EdgeObjectIndexByIndexFrom( x, 0 );
} else if ( h.Sink(x) && h.To(x).size( ) == 1 ) {
v = h.To(x)[0];
e = h.EdgeObjectIndexByIndexTo( x, 0 );
} else
continue;
if ( h.EdgeLengthKmers(e) > max_del ) continue;
if ( h.Source(x) ) {
if ( !( h.From(v).size( ) == 1 && h.To(v).size( ) == 2 ) ) continue;
d = h.EdgeObjectIndexByIndexFrom( v, 0 );
c = h.EdgeObjectIndexByIndexTo( v, 0 );
if ( c == e ) c = h.EdgeObjectIndexByIndexTo( v, 1 );
} else {
if ( !( h.From(v).size( ) == 2 && h.To(v).size( ) == 1 ) ) continue;
c = h.EdgeObjectIndexByIndexTo( v, 0 );
d = h.EdgeObjectIndexByIndexFrom( v, 0 );
if ( d == e ) d = h.EdgeObjectIndexByIndexFrom( v, 1 );
}
// We require that there is an edge "competing with e", that is at least
// 20 times longer.
static vec<int> v_only(1), to_v, from_v;
v_only[0] = v;
int max_competitor = 0;
if ( h.Source(x) ) {
h.digraph::GetPredecessors( v_only, to_v );
for ( int j = 0; j < to_v.isize( ); j++ ) {
int z = to_v[j];
for ( int i = 0; i < h.To(z).isize( ); i++ ) {
int e = h.EdgeObjectIndexByIndexTo( z, i );
max_competitor = Max( max_competitor, h.EdgeLengthKmers(e) );
}
}
} else {
h.digraph::GetSuccessors( v_only, from_v );
for ( int j = 0; j < from_v.isize( ); j++ ) {
int z = from_v[j];
for ( int i = 0; i < h.From(z).isize( ); i++ ) {
int e = h.EdgeObjectIndexByIndexFrom( z, i );
max_competitor = Max( max_competitor, h.EdgeLengthKmers(e) );
}
}
}
if ( min_ratio * h.EdgeLengthKmers(e) > max_competitor ) continue;
// Edit the graph.
if ( h.Source(x) ) h.DeleteEdgeFrom( x, 0 );
else h.DeleteEdgeTo( x, 0 );
}
}
// Find the indices of all edges e that form self-loops, i.e., e goes from v -> v.
template<class E> vec<int> digraphE<E>::SelfLoops( ) const
{
vec<int> to_left, to_right;
ToLeft( to_left );
ToRight( to_right );
vec<Bool> used;
Used( used );
vec<int> self_loops;
for ( int i = 0; i < EdgeObjectCount(); i++ )
if ( to_left[i] == to_right[i] && used[i] )
self_loops.push_back( i );
return self_loops;
}
template<class E> void digraphE<E>::LoopSubgraph( vec<int>& loop_edges ) const
{ loop_edges.clear( );
vec< vec<int> > SCC;
StronglyConnectedComponents(SCC);
for ( int i = 0; i < SCC.isize( ); i++ )
{ const vec<int>& V = SCC[i];
for ( int r = 0; r < V.isize( ); r++ )
{ int v = V[r];
for ( int j = 0; j < From(v).isize( ); j++ )
{ if ( BinMember( V, From(v)[j] ) )
{ loop_edges.push_back( EdgeObjectIndexByIndexFrom(
v, j ) ); } } } }
Sort(loop_edges); }
template<class E> void digraphE<E>::SplayVertex( const int v )
{ int n = N( );
AddVertices( To(v).size( ) );
for ( int j = To(v).isize( ) - 1; j >= 0; j-- )
GiveEdgeNewToVx( EdgeObjectIndexByIndexTo( v, j ), v, n + j );
n = N( );
AddVertices( From(v).size( ) );
for ( int j = From(v).isize( ) - 1; j >= 0; j-- )
{ GiveEdgeNewFromVx( EdgeObjectIndexByIndexFrom( v, j ),
v, n + j ); } }
template<class E> void digraphE<E>::LiberateEdge(
const int e, const int v, const int w )
{ int j = EdgeObjectIndexToFromIndex( v, e );
DeleteEdgeFrom( v, j );
SplayVertex(v), SplayVertex(w); }
template<class E> void digraphE<E>::GiveEdgeNewFromVx
( int edge_id, int old_from_v, int new_from_v ) {
int i = Position( from_edge_obj_[old_from_v], edge_id );
ForceAssert( i != -1 );
int w = from_[old_from_v][i];
int j = Position( to_edge_obj_[w],edge_id );
ForceAssert( j != -1 );
to_[w][j] = new_from_v;
from_[old_from_v].erase( from_[old_from_v].begin() + i );
from_edge_obj_[old_from_v].erase( from_edge_obj_[old_from_v].begin() + i );
from_[new_from_v].push_back(w);
from_edge_obj_[new_from_v].push_back(edge_id);
SortSync( to_[w], to_edge_obj_[w] );
SortSync( from_[new_from_v], from_edge_obj_[new_from_v] );
}
template<class E> void digraphE<E>::GiveEdgeNewToVx
( int edge_id, int old_to_w, int new_to_w ) {
int j = Position( to_edge_obj_[old_to_w], edge_id );
ForceAssert( j != -1 );
int v = to_[old_to_w][j];
int i = Position( from_edge_obj_[v],edge_id );
ForceAssert( i != -1 );
from_[v][i] = new_to_w;
to_[old_to_w].erase( to_[old_to_w].begin() + j );
to_edge_obj_[old_to_w].erase( to_edge_obj_[old_to_w].begin() + j );
to_[new_to_w].push_back(v);
to_edge_obj_[new_to_w].push_back(edge_id);
SortSync( from_[v], from_edge_obj_[v] );
SortSync( to_[new_to_w], to_edge_obj_[new_to_w] );
}
template<class F> int digraphE<F>::AddEdge( int v, int w, const F& e )
{ int n = EdgeObjectCount( );
edges_.push_back(e);
int i = upper_bound( from_[v].begin(), from_[v].end(), w ) - from_[v].begin();
from_[v].insert( from_[v].begin()+i, w );
from_edge_obj_[v].insert( from_edge_obj_[v].begin()+i, n );
int j = upper_bound( to_[w].begin(), to_[w].end(), v ) - to_[w].begin();
to_[w].insert( to_[w].begin()+j, v );
to_edge_obj_[w].insert( to_edge_obj_[w].begin()+j, n );
return n;
}
template<class E>
Bool digraphE<E>::EdgePaths( const vec<int>& left, const vec<int>& right,
const int v, const int w, vec< vec<int> >& paths, const int max_copies,
const int max_paths, const int max_iterations ) const
{
// Pretest to determine if the computation will explode. This only works if
// max_copies is not set.
if ( max_copies < 0 && ( max_paths >= 0 || max_iterations >= 0 ) )
{ vec<int> subs;
int path_count = 0;
for ( int i = 0; i < From(v).isize( ); i++ )
{ int e = EdgeObjectIndexByIndexFrom( v, i );
subs.push_back(e); }
int iterations = 0;
while( subs.nonempty( ) )
{ if ( max_iterations > 0 && ++iterations > max_iterations )
return False;
int p = subs.back( );
subs.pop_back( );
int x = right[p];
if ( x == w )
{ if ( max_paths >= 0 && ++path_count > max_paths )
return False; }
else
{ for ( int j = 0; j < From(x).isize( ); j++ )
{ int e = EdgeObjectIndexByIndexFrom( x, j );
subs.push_back(e); } } } }
// Now do the computation for real.
vec< vec<int> > subs;
paths.clear( );
for ( int i = 0; i < From(v).isize( ); i++ )
{ int e = EdgeObjectIndexByIndexFrom( v, i );
vec<int> one;
one.push_back(e);
subs.push_back(one); }
int iterations = 0;
while( subs.nonempty( ) )
{ if ( max_iterations > 0 && ++iterations > max_iterations ) return False;
vec<int> p = subs.back( );
subs.resize( subs.isize( ) - 1 );
int x = right[ p.back( ) ];
if ( x == w )
{ paths.push_back(p);
if ( max_paths >= 0 && paths.isize( ) > max_paths ) return False; }
else
{ for ( int j = 0; j < From(x).isize( ); j++ )
{ int e = EdgeObjectIndexByIndexFrom( x, j );
vec<int> pp(p);
pp.push_back(e);
if ( max_copies >= 0 )
{ vec<int> pps(pp);
Sort(pps);
Bool fail = False;
for ( int r = 0; r < pps.isize( ); r++ )
{ int s = pps.NextDiff(r);
if ( s - r > max_copies )
{ fail = True;
break; }
r = s - 1; }
if (fail) continue; }
subs.push_back(pp); } } }
return True; }
template<class E>
Bool digraphE<E>::EdgePaths( const int v, const int w, vec< vec<int> >& paths,
const int max_copies, const int max_paths, const int max_iterations ) const
{ vec<int> left, right;
ToLeft(left), ToRight(right);
return EdgePaths( left, right, v, w, paths, max_copies, max_paths,
max_iterations ); }
template<class E> void digraphE<E>::DeleteEdgeTo( int w, int j )
{ int v = to_[w][j];
int i = InputToOutputFrom( w, j );
to_[w].erase( to_[w].begin( ) + j );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j );
from_[v].erase( from_[v].begin( ) + i );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + i ); }
template<class E> void digraphE<E>::DeleteEdgeFrom( int v, int j )
{ int w = from_[v][j];
int i = InputFromOutputTo( v, j );
from_[v].erase( from_[v].begin( ) + j );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j );
to_[w].erase( to_[w].begin( ) + i );
to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i ); }
template<class E> void digraphE<E>::DeleteEdgesTo( int w, const vec<int>& js )
{ for ( int l = js.isize( ) - 1; l >= 0; l-- )
DeleteEdgeTo( w, js[l] ); }
template<class E> void digraphE<E>::DeleteEdgesFrom( int v, const vec<int>& js )
{ for ( int l = js.isize( ) - 1; l >= 0; l-- )
DeleteEdgeFrom( v, js[l] ); }
template<class E>
vec<int> digraphE<E>::EdgesBetween( const int v, const int w ) const
{ vec<int> b;
for ( int i = 0; i < From(v).isize( ); i++ )
{ if ( From(v)[i] == w )
b.push_back( EdgeObjectIndexByIndexFrom( v, i ) ); }
return b; }
template<class E> vec<int> digraphE<E>::EdgesBetween( const vec<int>& v ) const
{ vec<int> b;
for ( int j = 0; j < v.isize( ); j++ )
{ for ( int i = 0; i < From(v[j]).isize( ); i++ )
{ if ( BinMember( v, From(v[j])[i] ) )
b.push_back( EdgeObjectIndexByIndexFrom( v[j], i ) ); } }
Sort(b);
return b; }
template<class F>
vec<F> digraphE<F>::EdgeObjectsBetween( const int v, const int w ) const
{ vec<F> b;
for ( int i = 0; i < From(v).isize( ); i++ )
{ if ( From(v)[i] == w )
b.push_back( EdgeObjectByIndexFrom( v, i ) ); }
return b; }
template<class E> int digraphE<E>::InputToOutputFrom( int w, int i ) const
{ int v = to_[w][i];
int ei = to_edge_obj_[w][i];
for ( int j = 0; j < from_[v].isize( ); j++ )
if ( from_edge_obj_[v][j] == ei ) return j;
ForceAssert( 0 == 1 );
return -1; }
template<class E> int digraphE<E>::InputFromOutputTo( int w, int i ) const
{ int v = from_[w][i];
int ei = from_edge_obj_[w][i];
for ( int j = 0; j < to_[v].isize( ); j++ )
if ( to_edge_obj_[v][j] == ei ) return j;
ForceAssert( 0 == 1 );
return -1; }
template<class F> void digraphE<F>::ChangeEdgeObjectFrom( int v, int i, const F& e )
{ int ne = edges_.size( );
edges_.push_back(e);
int w = From(v)[i];
int j = InputFromOutputTo( v, i );
from_edge_obj_[v][i] = ne;
to_edge_obj_[w][j] = ne; }
template<class F> F digraphE<F>::MinEdge( int v, int w )
{ F m = 0;
Bool first = True;
for ( int j = 0; j < From(v).isize( ); j++ )
{ if ( From(v)[j] != w ) continue;
if (first) m = EdgeObjectByIndexFrom( v, j );
else m = Min( m, EdgeObjectByIndexFrom( v, j ) );
first = False; }
ForceAssert( !first );
return m; }
template<class F> F digraphE<F>::MaxEdge( int v, int w )
{ F M = 0;
Bool first = True;
for ( int j = 0; j < From(v).isize( ); j++ )
{ if ( From(v)[j] != w ) continue;
if (first) M = EdgeObjectByIndexFrom( v, j );
else M = Max( M, EdgeObjectByIndexFrom( v, j ) );
first = False; }
ForceAssert( !first );
return M; }
template<class F> void digraphE<F>::AddVertices( int nadd )
{ int nvert = N( );
from_.resize( nvert + nadd );
to_.resize( nvert + nadd );
from_edge_obj_.resize( nvert + nadd );
to_edge_obj_.resize( nvert + nadd ); }
template<class F> void digraphE<F>::DeleteEdges( const vec<int>& to_delete )
{ vec<int> to_delete_local;
if ( !to_delete.UniqueOrdered( ) )
{ to_delete_local = to_delete;
UniqueSort(to_delete_local); }
const vec<int>& tod
= ( to_delete_local.nonempty( ) ? to_delete_local: to_delete );
for ( int v = 0; v < N( ); v++ )
{ for ( int j = From(v).isize( ) - 1; j >= 0; j-- )
{ int e = EdgeObjectIndexByIndexFrom( v, j );
if ( BinMember( tod, e ) ) DeleteEdgeFrom( v, j ); } } }
template<class F> void digraphE<F>::DeleteEdgesParallel( const vec<Bool>& to_delete )
{ ForceAssertEq(to_delete.isize(), EdgeObjectCount());
#pragma omp parallel for
for ( int v = 0; v < N( ); v++ )
{ for ( int j = From(v).isize( ) - 1; j >= 0; j-- )
{ int e = from_edge_obj_[v][j];
if (to_delete[e]) {
from_[v].erase( from_[v].begin( ) + j );
from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j ); } }
for ( int j = To(v).isize( ) - 1; j >= 0; j-- )
{ int e = to_edge_obj_[v][j];
if (to_delete[e]) {
to_[v].erase( to_[v].begin( ) + j );
to_edge_obj_[v].erase( to_edge_obj_[v].begin( ) + j ); } } } }
template<class F> void digraphE<F>::DeleteEdges( const vec<int>& to_delete,
const vec<int>& to_left )
{ vec<int> to_delete_local;
if ( !to_delete.UniqueOrdered( ) )
{ to_delete_local = to_delete;
UniqueSort(to_delete_local); }
const vec<int>& tod
= ( to_delete_local.nonempty( ) ? to_delete_local: to_delete );
vec<int> vs;
for ( int i = 0; i < to_delete.isize( ); i++ )
vs.push_back( to_left[ to_delete[i] ] );
UniqueSort(vs);
for ( int i = 0; i < vs.isize( ); i++ )
{ int v = vs[i];
for ( int j = From(v).isize( ) - 1; j >= 0; j-- )
{ int e = EdgeObjectIndexByIndexFrom( v, j );
if ( BinMember( tod, e ) ) DeleteEdgeFrom( v, j ); } } }
template<class F> int digraphE<F>::EdgeObjectIndexByIndexTo( int v, int j ) const
{ CheckGoodVertex(v);
AssertGe( j, 0 );
AssertLt( j, to_edge_obj_[v].isize( ) );
return to_edge_obj_[v][j]; }
template<class F> int digraphE<F>::EdgeObjectIndexToFromIndex( int v, int e ) const
{ AssertGe( v, 0 );
AssertLt( v, from_edge_obj_.isize( ) );
for ( int i = 0; i < from_edge_obj_[v].isize( ); i++ )
if ( from_edge_obj_[v][i] == e ) return i;
return -1; }
template<class F> int digraphE<F>::EdgeObjectIndexToToIndex( int v, int e ) const
{ AssertGe( v, 0 );
AssertLt( v, to_edge_obj_.isize( ) );
for ( int i = 0; i < to_edge_obj_[v].isize( ); i++ )
if ( to_edge_obj_[v][i] == e ) return i;
return -1; }
template<class F> bool operator!=( const digraphE<F>& g1, const digraphE<F>& g2 )
{ return !(g1==g2); }
template<class F> bool EqualExceptEdgeObjectOrder(
const digraphE<F>& g1, const digraphE<F>& g2 )
{
if ( static_cast<digraph const&>(g1) != static_cast<digraph const&>(g2) )
return false;
// digraphs are the same, now check edge objects
typedef vec<int> V;
typedef V::const_iterator VI;
typedef vec<V> VV;
typedef VV::const_iterator VVI;
VV const& vv1 = g1.FromEdgeObj();
VV const& vv2 = g2.FromEdgeObj();
if ( vv1.size() != vv2.size() )
return false;
VVI oE(vv1.end());
for ( VVI o1(vv1.begin()), o2(vv2.begin()); o1 != oE; ++o1, ++o2 )
{
if ( o1->size() != o2->size() )
return false;
VI iE(o1->end());
for ( VI i1(o1->begin()), i2(o2->begin()); i1 != iE; ++i1, ++i2 )
if ( !(g1.EdgeObject(*i1) == g2.EdgeObject(*i2)) )
return false;
}
return true;
}
template<class E> bool operator==( const digraphE<E>& g1, const digraphE<E>& g2 )
{ if ( !EqualExceptEdgeObjectOrder( g1, g2 ) ) return false;
return g1.Edges( ) == g2.Edges( ); }
template<class E>
void Compare( ostream& out, const digraphE<E>& g1, const digraphE<E>& g2 )
{ if ( g1.N( ) != g2.N( ) )
cout << "first graph has " << g1.N( ) << " vertices but "
<< "second graph has " << g2.N( ) << "\n";
if ( g1.From( ) != g2.From( ) ) cout << "from_ not the same\n";
if ( g1.To( ) != g2.To( ) ) cout << "to_ not the same\n";
if ( g1.Edges( ) != g2.Edges( ) ) cout << "edges_ not the same\n";
if ( g1.ToEdgeObj( ) != g2.ToEdgeObj( ) )
cout << "to_edge_obj_ not the same\n";
if ( g1.FromEdgeObj( ) != g2.FromEdgeObj( ) )
cout << "from_edge_obj_ not the same\n";
if ( g1 != g2 ) cout << "DIGRAPHS ARE NOT EQUAL\n";
return; }
template<class E> void digraphE<E>::Clear( )
{ from_.clear( ), to_.clear( );
from_edge_obj_.clear( ), to_edge_obj_.clear( );
edges_.clear( ); }
template<class E> const E& digraphE<E>::EdgeObject( int i ) const
{ AssertGe( i, 0 );
AssertLt( i, edges_.isize( ) );
return edges_[i]; }
template<class E> E& digraphE<E>::EdgeObjectMutable( int i )
{ AssertGe( i, 0 );
AssertLt( i, edges_.isize( ) );
return edges_[i]; }
template<class V> const V& digraphV<V>::Vert( int v ) const
{ AssertGe( v, 0 );
AssertLt( v, N( ) );
return verts_[v]; }
template<class V> V& digraphV<V>::VertMutable( int v )
{ AssertGe( v, 0 );
AssertLt( v, N( ) );
return verts_[v]; }
template<class V, class E> const V& digraphVE<V,E>::Vert( int v ) const
{ AssertGe( v, 0 );
AssertLt( v, N( ) );
return verts_[v]; }
template<class V, class E> V& digraphVE<V,E>::VertMutable( int v )
{ AssertGe( v, 0 );
AssertLt( v, N( ) );
return verts_[v]; }
template<class V> void digraphV<V>::DeleteVertex( const int v )
{ int n = N( );
AssertGe( v, 0 );
AssertLt( v, n );
DeleteEdgesAtVertex(v);
verts_.erase( verts_.begin( ) + v );
from_.erase( from_.begin( ) + v );
to_.erase( to_.begin( ) + v );
for ( int x = 0; x < n - 1; x++ )
{ for ( int j = 0; j < From(x).isize( ); j++ )
if ( From(x)[j] >= v ) FromMutable(x)[j]--;
for ( int j = 0; j < To(x).isize( ); j++ )
if ( To(x)[j] >= v ) ToMutable(x)[j]--; } }
template<class V> void digraphV<V>::DeleteVertices( const vec<int>& v )
{ for ( int m = v.isize( ) - 1; m >= 0; m-- )
DeleteVertex( v[m] ); }
template<class V> int digraphV<V>::AddVertex( const V& v )
{ verts_.push_back(v);
from_.resize( from_.size( ) + 1 );
to_.resize( to_.size( ) + 1 );
return verts_.size() - 1; }
template<class V, class E> void digraphVE<V,E>::AddVertex( const V& v )
{ verts_.push_back(v);
this->FromMutable( ).resize( this->From( ).size( ) + 1 );
this->ToMutable( ).resize( this->To( ).size( ) + 1 );
this->FromEdgeObjMutable( ).resize( this->FromEdgeObj( ).size( ) + 1 );
this->ToEdgeObjMutable( ).resize( this->ToEdgeObj( ).size( ) + 1 ); }
template<class V, class E> void digraphVE<V,E>::RemoveVertices( const vec<int>& to_remove )
{
vec<Bool> to_delete(verts_.size(),False );
for( auto entry: to_remove){
to_delete[entry]=True;
digraphE<E>::DeleteEdgesAtVertex(entry);
}
digraphE<E>::RemoveEdgelessVertices(to_remove);
EraseIf(verts_,to_delete);
}
template<class E>
vec<int> digraphE<E>::EdgesSomewhereBetween( const int v, const int w ) const
{ vec<int> answer, after_v, before_w, both;
GetSuccessors1( v, after_v ), GetPredecessors1( w, before_w );
Intersection( after_v, before_w, both );
for ( int l = 0; l < both.isize( ); l++ )
{ int s = both[l];
for ( int j = 0; j < From(s).isize( ); j++ )
{ int t = From(s)[j];
if ( BinMember( both, t ) )
answer.append( EdgesBetween( s, t ) ); } }
UniqueSort(answer);
return answer; }
template<class E>
void digraphE<E>::writeBinary( BinaryWriter& writer ) const
{
digraph::writeBinary(writer);
writer.write(from_edge_obj_);
writer.write(to_edge_obj_);
writer.write(edges_); }
template<class E>
void digraphE<E>::readBinary( BinaryReader& reader )
{
digraph::readBinary(reader);
reader.read(&from_edge_obj_);
reader.read(&to_edge_obj_);
reader.read(&edges_); }
template<class F> void digraphEX<F>::writeBinary( BinaryWriter& writer ) const
{ digraphX::writeBinary(writer);
writer.write(from_edge_obj_);
writer.write(to_edge_obj_);
writer.write(edges_);
writer.write(to_left_);
writer.write(to_right_); }
template<class F> void digraphEX<F>::readBinary( BinaryReader& reader )
{ digraphX::readBinary(reader);
reader.read(&from_edge_obj_);
reader.read(&to_edge_obj_);
reader.read(&edges_);
reader.read(&to_left_);
reader.read(&to_right_); }
template<class V>
void digraphV<V>::writeBinary( BinaryWriter& writer ) const
{
digraph::writeBinary(writer);
writer.write(verts_); }
template<class V>
void digraphV<V>::readBinary( BinaryReader& reader )
{
digraph::readBinary(reader);
reader.read( &verts_ ); }
template<class V, class E>
void digraphVE<V,E>::writeBinary( BinaryWriter& writer ) const
{ digraphE<E>::writeBinary(writer);
writer.write(verts_); }
template<class V, class E>
void digraphVE<V,E>::readBinary( BinaryReader& reader )
{ digraphE<E>::readBinary(reader);
reader.read( &verts_ ); }
template<class E> void EmbeddedSubPath<E>::TestValid( ) const
{ ForceAssertEq( e_.isize( ), a_.isize( ) - 1 );
for ( int u = 0; u < a_.isize( ) - 1; u++ )
{ const vec<int>& fr = D_->From( a_[u] );
ForceAssertGe( e_[u], 0 );
ForceAssertLt( e_[u], fr.isize( ) );
ForceAssertEq( fr[ e_[u] ], a_[u+1] );
ForceAssertEq( D_->EdgeObjectIndexByIndexFrom( a_[u], e_[u] ),
esafe_[u] ); } }
template<class E> void EmbeddedSubPath<E>::Repair( )
{ for ( int u = 0; u < e_.isize( ); u++ )
{ if ( D_->EdgeObjectIndexByIndexFrom( a_[u], e_[u] ) != esafe_[u] )
e_[u] = D_->EdgeObjectIndexToFromIndex(
a_[u], esafe_[u] ); } }
template<class E> void DistancesToEnd3( const digraphE<E>& G,
int (E::*len)( ) const, const int max_dist, const Bool fw, vec<int>& D,
vec<Bool>& complete, const int max_paths )
{
D.resize( G.N( ), 0 );
complete.resize( G.N( ) );
#pragma omp parallel for
for ( int v = 0; v < D.isize( ); v++ )
{ vec< pair< vec<int>, int > >
paths( { make_pair( vec<int>({v}), 0 ) } );
while( paths.isize( ) <= max_paths )
{ vec< pair< vec<int>, int > > paths2;
for ( const auto& p : paths )
{ int x = p.first.back( );
vec< pair<int,int> > ext;
for ( int j = 0;
j < ( fw ? G.From(x).isize( ) : G.To(x).isize( ) ); j++ )
{ int y = ( fw ? G.From(x)[j] : G.To(x)[j] );
if ( Member( p.first, y ) ) continue;
int e = ( fw ? G.EdgeObjectIndexByIndexFrom( x, j )
: G.EdgeObjectIndexByIndexTo( x, j ) );
int l = (G.EdgeObject(e).*len)( );
ext.push( y, l ); }
ReverseSort(ext);
for ( int i = 0; i < ext.isize( ); i++ )
{ int j;
for ( j = i + 1; j < ext.isize( ); j++ )
if ( ext[j].first != ext[i].first ) break;
auto q(p);
q.first.push_back( ext[i].first );
q.second += ext[i].second;
paths2.push_back(q);
i = j - 1; }
if ( ext.empty( ) ) paths2.push_back(p); }
if ( paths2 == paths ) break;
paths = paths2; }
complete[v] = ( paths.isize( ) <= max_paths );
for ( int i = 0; i < paths.isize( ); i++ )
D[v] = Max( D[v], paths[i].second ); } }
template<class E> void RemoveHangingEnds3( digraphE<E>& G,
int (E::*len)( ) const, const int max_del, const double min_ratio,
const int max_paths )
{
// Track hanging ends.
vec<Bool> hanging( G.EdgeObjectCount( ), False );
// Define the maximum length that we care about.
const int max_dist = int( ceil( double(max_del) * min_ratio ) );
// Go through two passes (forward and reverse).
for ( int pass = 1; pass <= 2; pass++ )
{
// Compute distances to end.
vec<int> D;
vec<Bool> complete;
DistancesToEnd3( G, len, max_dist, pass == 1, D, complete, max_paths );
// Identify hanging ends.
#pragma omp parallel for
for ( int v = 0; v < G.N( ); v++ )
{ const vec<int>& V = ( pass == 1 ? G.From(v) : G.To(v) );
vec<int> d( V.size( ) ), id( V.size( ), vec<int>::IDENTITY );
vec<Bool> c( V.size( ) );
for ( int j = 0; j < V.isize( ); j++ )
{ d[j] = ((pass == 1
? G.EdgeObjectByIndexFrom(v,j) : G.EdgeObjectByIndexTo(v,j))
.*len)( ) + D[ V[j] ];
c[j] = complete[ V[j] ]; }
ReverseSortSync( d, c, id );
for ( int j = 1; j < d.isize( ); j++ )
{ if ( d[j] <= max_del && d[0] >= d[j] * min_ratio && c[j] )
{ hanging[ ( pass == 1
? G.EdgeObjectIndexByIndexFrom( v, id[j] )
: G.EdgeObjectIndexByIndexTo( v, id[j] ) ) ]
= True; } } } }
// Remove hanging ends.
vec<int> to_delete;
for ( int i = 0; i < G.EdgeObjectCount( ); i++ )
if ( hanging[i] ) to_delete.push_back(i);
G.DeleteEdges(to_delete); }
template<class E> int64_t digraphE<E>::CheckSum( ) const
{ int64_t x = 0;
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < From(v).isize( ); j++ )
x += ( v + 1 ) * (j + 1 ) * ( From(v)[j] + 1 );
for ( int j = 0; j < To(v).isize( ); j++ )
x += ( v + 1 ) * (j + 1 ) * ( To(v)[j] + 1 );
for ( int j = 0; j < FromEdgeObj(v).isize( ); j++ )
x += ( v + 1 ) * (j + 1 ) * ( FromEdgeObj(v)[j] + 1 );
for ( int j = 0; j < ToEdgeObj(v).isize( ); j++ )
x += ( v + 1 ) * (j + 1 ) * ( ToEdgeObj(v)[j] + 1 ); }
return x; }
template<class E> const E& digraphEX<E>::EdgeObject( int i ) const
{ AssertGe( i, 0 );
AssertLt( i, (int) edges_.size( ) );
return edges_[i]; }
template<class F> digraphEX<F>::digraphEX( const digraphE<F>& G )
{ from_.resize( G.N( ) );
to_.resize( G.N( ) );
from_edge_obj_.resize( G.N( ) );
to_edge_obj_.resize( G.N( ) );
for ( int i = 0; i < G.N( ); i++ )
{ from_[i].resize( G.From(i).size( ) );
for ( int j = 0; j < G.From(i).isize( ); j++ )
from_[i][j] = G.From(i)[j];
to_[i].resize( G.To(i).size( ) );
for ( int j = 0; j < G.To(i).isize( ); j++ )
to_[i][j] = G.To(i)[j];
from_edge_obj_[i].resize( G.From(i).size( ) );
for ( int j = 0; j < G.From(i).isize( ); j++ )
from_edge_obj_[i][j] = G.IFrom( i, j );
to_edge_obj_[i].resize( G.To(i).size( ) );
for ( int j = 0; j < G.To(i).isize( ); j++ )
to_edge_obj_[i][j] = G.ITo( i, j ); }
edges_.resize( G.EdgeObjectCount( ) );
for ( int e = 0; e < G.EdgeObjectCount( ); e++ )
edges_[e] = G.EdgeObject(e);
to_left_.resize( E( ) ), to_right_.resize( E( ) );
for ( int v = 0; v < N( ); v++ )
{ for ( int j = 0; j < (int) From(v).size( ); j++ )
to_left_[ IFrom( v, j ) ] = v;
for ( int j = 0; j < (int) To(v).size( ); j++ )
to_right_[ ITo( v, j ) ] = v; } }
template<class F> digraphE<F> digraphEX<F>::AsDigraphE() const {
vec< vec<int> > from(N()), to(N());
vec< vec<int> > to_edge_obj(N()), from_edge_obj(N());
vec<F> edges(E());
for ( int i = 0; i < N( ); i++ ) {
from[i].resize( From(i).size( ) );
for ( size_t j = 0; j < From(i).size( ); j++ )
from[i][j] = From(i)[j];
to[i].resize( To(i).size( ) );
for ( size_t j = 0; j < To(i).size( ); j++ )
to[i][j] = To(i)[j];
from_edge_obj[i].resize( From(i).size( ) );
for ( size_t j = 0; j < From(i).size( ); j++ )
from_edge_obj[i][j] = IFrom( i, j );
to_edge_obj[i].resize( To(i).size( ) );
for ( size_t j = 0; j < To(i).size( ); j++ )
to_edge_obj[i][j] = ITo( i, j );
}
for ( int e = 0; e < E(); e++ )
edges[e] = EdgeObject(e);
return digraphE<F>(from, to, edges, to_edge_obj, from_edge_obj, true);
}
#endif
|
MS_Hybrid_static.c | /*
Hybrid static Mandelbort sort
*/
#include <X11/Xlib.h>
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <mpi.h>
struct timeval tv1, tv2;
double t = 0;
typedef struct complextype
{
double real, imag;
} Compl;
int main(int argc, char *argv[])
{
// ----------input---------------
int size, rank;
int number_thread = atoi(argv[1]);
double leftR = atof(argv[2]);
double rightR = atof(argv[3]);
double lowerR = atof(argv[4]);
double upperR = atof(argv[5]);
int width = atoi(argv[6]);
int height = atoi(argv[7]);
char *en = argv[8];
int num_r;
int dis;
int remainder=0;
if( *en == 'e')dis = 1;
else dis = 0;
// ------------MPI initial----------------
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_size (MPI_COMM_WORLD, &size);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0)gettimeofday(&tv1, NULL);
double t_start[24], total[24]={0};
double t=0;
double t_win = omp_get_wtime();
// -----------node of number row
if(height % size == 0)
num_r = height / size;
else
num_r = height / size +1;
if(rank == size-1 && height % size!=0)
remainder = height % size;
// ------display && root-----
if(rank==0){
Display *display;
Window window; //initialization for a window
int screen; //which screen
GC gc;
XGCValues values;
long valuemask = 0;
if(dis ==1){
/* open connection with the server */
display = XOpenDisplay(NULL);
if(display == NULL) {
fprintf(stderr, "cannot open display\n");
return 0;
}
screen = DefaultScreen(display);
/* set window size */
//int width = 800;
//int height = 800;
/* set window position */
int x = 0;
int y = 0;
/* border width in pixels */
int border_width = 0;
/* create window */
window = XCreateSimpleWindow(display, RootWindow(display, screen), x, y, width, height, border_width,
BlackPixel(display, screen), WhitePixel(display, screen));
/* create graph */
//GC gc;
//XGCValues values;
//long valuemask = 0;
gc = XCreateGC(display, window, valuemask, &values);
//XSetBackground (display, gc, WhitePixel (display, screen));
XSetForeground (display, gc, BlackPixel (display, screen));
XSetBackground(display, gc, 0X0000FF00);
XSetLineAttributes (display, gc, 1, LineSolid, CapRound, JoinRound);
/* map(show) the window */
XMapWindow(display, window);
XSync(display, 0);
}
Compl z, c;
int repeats;
double temp, lengthsq;
int i, j, k;
int Pi[num_r][width];
omp_set_num_threads(number_thread);
#pragma omp parallel default(shared) private(j, z, c, repeats, lengthsq, temp)
{
#pragma omp for schedule(auto)
for(i=0; i<width; i++) {
for(j=rank*num_r; j<rank*num_r+num_r && j<height; j++) {
z.real = 0.0;
z.imag = 0.0;
c.real = leftR + (double)((double)i *((rightR-leftR)/(double)width)); /* Theorem : If c belongs to M(Mandelbrot set), then |c| <= 2 */
c.imag = lowerR + (double)((double)j * ((upperR-lowerR)/(double)height)); // imag is y axis
repeats = 0;
lengthsq = 0.0;
while(repeats < 100000 && lengthsq < 4.0) { /* Theorem : If c belongs to M, then |Zn| <= 2. So Zn^2 <= 4 */
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
repeats++;
}
//Pi[j-(rank*num_r)][i] = repeats;
if(dis ==1 ){
#pragma omp critical
{
XSetForeground (display, gc, 1024 * 1024 * (repeats% 256));
XDrawPoint (display, window, gc, i, j);
}
}
}
}
}
if(dis == 1)
XFlush(display);
printf("this");
// master recive
#pragma omp critical
{
for(k=0;k<size-1;k++){
MPI_Recv(Pi,num_r*width,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
for(i=(status.MPI_SOURCE*num_r);i<(status.MPI_SOURCE*num_r+num_r) && i<height ;i++)
for(j=0;j<width;j++){
if(dis == 1){
XSetForeground (display, gc, 1024 * 1024 * (Pi[i-status.MPI_SOURCE*num_r][j]% 256));
XDrawPoint (display, window, gc, j, i);
//printf("\n n %d",status.MPI_SOURCE);
}
}
}
}
t_win = omp_get_wtime()-t_win;
if(dis == 1)
XFlush(display);
}else{
/* draw points */
Compl z, c;
int repeats;
double temp, lengthsq;
int i, j;
int Pi[num_r][width];
t_win = omp_get_wtime()-t_win;
omp_set_num_threads(number_thread);
#pragma omp parallel default(shared) private(i,j, z, c, repeats, lengthsq, temp)
{
t_start[omp_get_thread_num()] = 0;
#pragma omp for schedule(static)
for(j=0; j<num_r ; j++) {
t_start[omp_get_thread_num()] = omp_get_wtime();
if(j+rank*num_r<height){
for(i=0; i<width; i++) {
z.real = 0.0;
z.imag = 0.0;
c.real = leftR + (double)((double)i *((rightR-leftR)/(double)width)); /* Theorem : If c belongs to M(Mandelbrot set), then |c| <= 2 */
c.imag = lowerR + (double)((double)(j+rank*num_r )* ((upperR-lowerR)/(double)height)); // imag is y axis
repeats = 0;
lengthsq = 0.0;
while(repeats < 100000 && lengthsq < 4.0) { /* Theorem : If c belongs to M, then |Zn| <= 2. So Zn^2 <= 4 */
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
repeats++;
}
Pi[j][i] = repeats;
}
}else
Pi[j][i]=0;
total[omp_get_thread_num()]+=(omp_get_wtime()-t_start[omp_get_thread_num()]);
}
//printf("q=%d \n",rank);
}
#pragma omp critical
{
MPI_Send(Pi,num_r*width , MPI_INT,0,rank,MPI_COMM_WORLD);
}
}
int i;
for(i=0;i<number_thread;i++){
printf("%d %f\n",i+rank*number_thread,total[i]+t_win);
}
if(rank == 0){
gettimeofday(&tv2, NULL);
t += (double)(tv2.tv_sec - tv1.tv_sec)+(double)(tv2.tv_usec - tv1.tv_usec)/1000000.0;
//printf("hys=%lf\n", t);
}
sleep(5);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/string_.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces colors in the image from a color lookup table.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image)
% MagickBooleanType ClutImageChannel(Image *image,
% const ChannelType channel,Image *clut_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o clut_image: the color lookup.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image)
{
return(ClutImageChannel(image,DefaultChannels,clut_image));
}
MagickExport MagickBooleanType ClutImageChannel(Image *image,
const ChannelType channel,const Image *clut_image)
{
#define ClutImageTag "Clut/Image"
ExceptionInfo
*exception;
long
adjust,
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
ResampleFilter
**resample_filter;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(clut_image,&zero);
adjust=clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1;
exception=(&image->exception);
resample_filter=AcquireResampleFilterThreadSet(clut_image,MagickTrue,
exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
id,
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
id=GetPixelCacheThreadId();
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) ResamplePixelColor(resample_filter[id],QuantumScale*q->red*
(clut_image->columns-adjust),QuantumScale*q->red*
(clut_image->rows-adjust),&pixel);
q->red=RoundToQuantum(pixel.red);
}
if ((channel & GreenChannel) != 0)
{
(void) ResamplePixelColor(resample_filter[id],QuantumScale*q->green*
(clut_image->columns-adjust),QuantumScale*q->green*
(clut_image->rows-adjust),&pixel);
q->green=RoundToQuantum(pixel.green);
}
if ((channel & BlueChannel) != 0)
{
(void) ResamplePixelColor(resample_filter[id],QuantumScale*q->blue*
(clut_image->columns-adjust),QuantumScale*q->blue*
(clut_image->rows-adjust),&pixel);
q->blue=RoundToQuantum(pixel.blue);
}
if ((channel & OpacityChannel) != 0)
{
if (clut_image->matte == MagickFalse)
{
/*
A gray-scale LUT replacement for an image alpha channel.
*/
(void) ResamplePixelColor(resample_filter[id],QuantumScale*
(QuantumRange-q->opacity)*(clut_image->columns+adjust),
QuantumScale*(QuantumRange-q->opacity)*(clut_image->rows+
adjust),&pixel);
q->opacity=(Quantum) (QuantumRange-MagickPixelIntensityToQuantum(
&pixel));
}
else
if (image->matte == MagickFalse)
{
/*
A greyscale image being colored by a LUT with transparency.
*/
(void) ResamplePixelColor(resample_filter[id],QuantumScale*
PixelIntensity(q)*(clut_image->columns-adjust),QuantumScale*
PixelIntensity(q)*(clut_image->rows-adjust),&pixel);
q->opacity=RoundToQuantum(pixel.opacity);
}
else
{
/*
Direct alpha channel lookup.
*/
(void) ResamplePixelColor(resample_filter[id],QuantumScale*
q->opacity*(clut_image->columns-adjust),QuantumScale*
q->opacity* (clut_image->rows-adjust),&pixel);
q->opacity=RoundToQuantum(pixel.opacity);
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
(void) ResamplePixelColor(resample_filter[id],QuantumScale*indexes[x]*
(clut_image->columns-adjust),QuantumScale*indexes[x]*
(clut_image->rows-adjust),&pixel);
indexes[x]=RoundToQuantum(pixel.index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
/*
Enable alpha channel if CLUT image could enable it.
*/
if ((clut_image->matte == MagickTrue) && ((channel & OpacityChannel) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin(MagickPI*(brightness-0.5))+1.0)-brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen)
{
#define ContrastImageTag "Contrast/Image"
ExceptionInfo
*exception;
int
sign;
long
progress,
y;
MagickBooleanType
status;
register long
i;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (long) image->colors; i++)
Contrast(sign,&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
Contrast(sign,&q->red,&q->green,&q->blue);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The ContrastStretchImage() is a simple image enhancement technique that
% attempts to improve the contrast in an image by `stretching' the range of
% intensity values it contains to span a desired range of values. It differs
% from the more sophisticated histogram equalization in that it can only
% apply % a linear scaling function to the image pixel values. As a result
% the `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const unsigned long channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const char *levels)
{
double
black_point,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) image->columns*image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) QuantumRange/100.0;
white_point*=(double) QuantumRange/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) image->columns*image->rows-black_point;
status=ContrastStretchImageChannel(image,DefaultChannels,black_point,
white_point);
return(status);
}
MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
double
intensity;
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
black,
*histogram,
*stretch_map,
white;
register long
i;
ViewInfo
*image_view;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
stretch_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if ((histogram == (MagickPixelPacket *) NULL) ||
(stretch_map == (MagickPixelPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
status=MagickTrue;
exception=(&image->exception);
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireCacheView(image);
for (y=0; y < (long) image->rows; y++)
{
IndexPacket
*indexes;
register const PixelPacket
*p;
register long
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (channel == DefaultChannels)
for (x=0; x < (long) image->columns; x++)
{
Quantum
intensity;
intensity=PixelIntensityToQuantum(p+x);
histogram[ScaleQuantumToMap(intensity)].red++;
histogram[ScaleQuantumToMap(intensity)].green++;
histogram[ScaleQuantumToMap(intensity)].blue++;
histogram[ScaleQuantumToMap(intensity)].index++;
}
else
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap((p+x)->red)].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap((p+x)->green)].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap((p+x)->blue)].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap((p+x)->opacity)].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(indexes[x])].index++;
}
}
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (long) MaxMap; i++)
{
intensity+=histogram[i].red;
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(long) MaxMap; i != 0; i--)
{
intensity+=histogram[i].red;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (long) MaxMap; i++)
{
intensity+=histogram[i].green;
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(long) MaxMap; i != 0; i--)
{
intensity+=histogram[i].green;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (long) MaxMap; i++)
{
intensity+=histogram[i].blue;
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(long) MaxMap; i != 0; i--)
{
intensity+=histogram[i].blue;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (long) MaxMap; i++)
{
intensity+=histogram[i].opacity;
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(long) MaxMap; i != 0; i--)
{
intensity+=histogram[i].opacity;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (long) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(long) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (long) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (long) black.red)
stretch_map[i].red=0.0;
else
if (i > (long) white.red)
stretch_map[i].red=(MagickRealType) QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (long) black.green)
stretch_map[i].green=0.0;
else
if (i > (long) white.green)
stretch_map[i].green=(MagickRealType) QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.green)/(white.green-
black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (long) black.blue)
stretch_map[i].blue=0.0;
else
if (i > (long) white.blue)
stretch_map[i].blue=(MagickRealType) QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.blue)/(white.blue-
black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (long) black.opacity)
stretch_map[i].opacity=0.0;
else
if (i > (long) white.opacity)
stretch_map[i].opacity=(MagickRealType) QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.opacity)/(white.opacity-
black.opacity)));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (long) black.index)
stretch_map[i].index=0.0;
else
if (i > (long) white.index)
stretch_map[i].index=(MagickRealType) QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) (MaxMap*(i-black.index)/(white.index-
black.index)));
}
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=RoundToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=RoundToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=RoundToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=RoundToQuantum(stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
}
/*
Stretch image.
*/
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
q->red=RoundToQuantum(stretch_map[ScaleQuantumToMap(q->red)].red);
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
q->green=RoundToQuantum(stretch_map[ScaleQuantumToMap(
q->green)].green);
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
q->blue=RoundToQuantum(stretch_map[ScaleQuantumToMap(
q->blue)].blue);
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
q->opacity=RoundToQuantum(stretch_map[ScaleQuantumToMap(
q->opacity)].opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (black.index != white.index)
indexes[x]=(IndexPacket) RoundToQuantum(stretch_map[
ScaleQuantumToMap(indexes[x])].index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(MagickPixelPacket *) RelinquishMagickMemory(stretch_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define Enhance(weight) \
mean=((MagickRealType) r->red+pixel.red)/2; \
distance=(MagickRealType) r->red-(MagickRealType) pixel.red; \
distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \
mean)*distance*distance; \
mean=((MagickRealType) r->green+pixel.green)/2; \
distance=(MagickRealType) r->green-(MagickRealType) pixel.green; \
distance_squared+=4.0*distance*distance; \
mean=((MagickRealType) r->blue+pixel.blue)/2; \
distance=(MagickRealType) r->blue-(MagickRealType) pixel.blue; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
mean=((MagickRealType) r->opacity+pixel.opacity)/2; \
distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \
distance_squared+=QuantumScale*(3.0*((MagickRealType) \
QuantumRange+1.0)-1.0-mean)*distance*distance; \
if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \
QuantumRange/25.0f)) \
{ \
aggregate.red+=(weight)*r->red; \
aggregate.green+=(weight)*r->green; \
aggregate.blue+=(weight)*r->blue; \
aggregate.opacity+=(weight)*r->opacity; \
total_weight+=(weight); \
} \
r++;
#define EnhanceImageTag "Enhance/Image"
Image
*enhance_image;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
ViewInfo
*enhance_view,
*image_view;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((image->columns < 5) || (image->rows < 5))
return((Image *) NULL);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse)
{
InheritException(exception,&enhance_image->exception);
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
enhance_view=AcquireCacheView(enhance_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
/*
Read another scan line.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
MagickPixelPacket
aggregate;
MagickRealType
distance,
distance_squared,
mean,
total_weight;
PixelPacket
pixel;
register const PixelPacket
*r;
/*
Compute weighted average of target pixel color components.
*/
aggregate=zero;
total_weight=0.0;
r=p+2*(image->columns+4)+2;
pixel=(*r);
r=p;
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
r=p+(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+2*(image->columns+4);
Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0);
r=p+3*(image->columns+4);
Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0);
r=p+4*(image->columns+4);
Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0);
q->red=(Quantum) ((aggregate.red+(total_weight/2)-1)/total_weight);
q->green=(Quantum) ((aggregate.green+(total_weight/2)-1)/total_weight);
q->blue=(Quantum) ((aggregate.blue+(total_weight/2)-1)/total_weight);
q->opacity=(Quantum) ((aggregate.opacity+(total_weight/2)-1)/
total_weight);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
enhance_view=DestroyCacheView(image_view);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image)
{
return(EqualizeImageChannel(image,DefaultChannels));
}
MagickExport MagickBooleanType EqualizeImageChannel(Image *image,
const ChannelType channel)
{
#define EqualizeImageTag "Equalize/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
black,
*equalize_map,
*histogram,
intensity,
*map,
white;
register long
i;
ViewInfo
*image_view;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*equalize_map));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if ((equalize_map == (MagickPixelPacket *) NULL) ||
(histogram == (MagickPixelPacket *) NULL) ||
(map == (MagickPixelPacket *) NULL))
{
if (map != (MagickPixelPacket *) NULL)
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (equalize_map != (MagickPixelPacket *) NULL)
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (long) image->rows; y++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register long
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
histogram[ScaleQuantumToMap(p->red)].red++;
if ((channel & GreenChannel) != 0)
histogram[ScaleQuantumToMap(p->green)].green++;
if ((channel & BlueChannel) != 0)
histogram[ScaleQuantumToMap(p->blue)].blue++;
if ((channel & OpacityChannel) != 0)
histogram[ScaleQuantumToMap(p->opacity)].opacity++;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
histogram[ScaleQuantumToMap(indexes[x])].index++;
p++;
}
}
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (long) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].red;
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].green;
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].blue;
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
intensity.index+=histogram[i].index;
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (long) MaxMap; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=(MagickRealType) ScaleMapToQuantum(
(MagickRealType) ((MaxMap*(map[i].opacity-black.opacity))/
(white.opacity-black.opacity)));
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=(MagickRealType) ScaleMapToQuantum((MagickRealType)
((MaxMap*(map[i].index-black.index))/(white.index-black.index)));
}
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
map=(MagickPixelPacket *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=RoundToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=RoundToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=RoundToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue);
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=RoundToQuantum(equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity);
}
}
/*
Equalize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if (((channel & RedChannel) != 0) && (white.red != black.red))
q->red=RoundToQuantum(equalize_map[ScaleQuantumToMap(q->red)].red);
if (((channel & GreenChannel) != 0) && (white.green != black.green))
q->green=RoundToQuantum(equalize_map[ScaleQuantumToMap(
q->green)].green);
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
q->blue=RoundToQuantum(equalize_map[ScaleQuantumToMap(q->blue)].blue);
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
q->opacity=RoundToQuantum(equalize_map[ScaleQuantumToMap(
q->opacity)].opacity);
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
indexes[x]=RoundToQuantum(equalize_map[ScaleQuantumToMap(
indexes[x])].index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma)
% MagickBooleanType GammaImageChannel(Image *image,
% const ChannelType channel,const double gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o gamma: the image gamma.
%
*/
MagickExport MagickBooleanType GammaImage(Image *image,const char *level)
{
GeometryInfo
geometry_info;
MagickPixelPacket
gamma;
MagickStatusType
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
gamma.red=geometry_info.rho;
gamma.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
gamma.green=gamma.red;
gamma.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
gamma.blue=gamma.red;
if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0))
return(MagickTrue);
status=GammaImageChannel(image,RedChannel,(double) gamma.red);
status|=GammaImageChannel(image,GreenChannel,(double) gamma.green);
status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType GammaImageChannel(Image *image,
const ChannelType channel,const double gamma)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
Quantum
*gamma_map;
register long
i;
ViewInfo
*image_view;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (long) MaxMap; i++)
gamma_map[i]=RoundToQuantum((MagickRealType) ScaleMapToQuantum((
MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma))));
if (image->storage_class == PseudoClass)
{
/*
Gamma-correct colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=gamma_map[
ScaleQuantumToMap(image->colormap[i].red)];
if ((channel & GreenChannel) != 0)
image->colormap[i].green=gamma_map[
ScaleQuantumToMap(image->colormap[i].green)];
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=gamma_map[
ScaleQuantumToMap(image->colormap[i].blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
image->colormap[i].opacity=gamma_map[
ScaleQuantumToMap(image->colormap[i].opacity)];
else
image->colormap[i].opacity=(Quantum) QuantumRange-
gamma_map[ScaleQuantumToMap((Quantum) (QuantumRange-
image->colormap[i].opacity))];
}
}
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=gamma_map[ScaleQuantumToMap(q->red)];
if ((channel & GreenChannel) != 0)
q->green=gamma_map[ScaleQuantumToMap(q->green)];
if ((channel & BlueChannel) != 0)
q->blue=gamma_map[ScaleQuantumToMap(q->blue)];
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
q->opacity=gamma_map[ScaleQuantumToMap(q->opacity)];
else
q->opacity=(Quantum) QuantumRange-gamma_map[
ScaleQuantumToMap((Quantum) (QuantumRange-q->opacity))];
}
q++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
for (x=0; x < (long) image->columns; x++)
indexes[x]=gamma_map[ScaleQuantumToMap(indexes[x])];
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImageChannel() and LevelizeImageChannel(), below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o levels: Specify the levels where the black and white points have the
% range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2).
% A '!' flag inverts the re-mapping.
%
*/
MagickExport MagickBooleanType LevelImage(Image *image,const char *levels)
{
double
black_point,
gamma,
white_point;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Parse levels.
*/
if (levels == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(levels,&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) image->columns*image->rows/100.0;
white_point*=(double) image->columns*image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((flags & AspectValue ) == 0)
status=LevelImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
else
status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point,
gamma);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageChannel() applies the normal LevelImage() operation to just the
% Specific channels specified, spreading out the values between the black and
% white points over the entire range of values. Gamma correction is also
% applied after the values has been mapped.
%
% It is typically used to improve image contrast, or to provide a controlled
% linear threshold for the image. If the black and white points are set to
% the minimum and maximum values found in the image, the image can be
% normalized. or by swapping black and white values, negate the image.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelImageChannel(Image *image,
% const ChannelType channel,black_point,white_point,gamma)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level which is to be mapped to zero (black)
%
% o white_point: The level which is to be mapped to QuantiumRange (white)
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelImageTag "Level/Image"
#define LevelValue(x) (RoundToQuantum((MagickRealType) QuantumRange* \
pow(((double) (x)-black_point)/(white_point-black_point),1.0/gamma)))
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
register long
i;
ViewInfo
*image_view;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=LevelValue(image->colormap[i].opacity);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=LevelValue(q->red);
if ((channel & GreenChannel) != 0)
q->green=LevelValue(q->green);
if ((channel & BlueChannel) != 0)
q->blue=LevelValue(q->blue);
if ((channel & OpacityChannel) != 0)
q->opacity=LevelValue(q->opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=LevelValue(indexes[x]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImageChannel() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImageChannel() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used for example de-contrast a greyscale image to the exact
% levels specified. Or by using specific levels for each channel of an image
% you can convert a gray-scale image to any linear color gradient, according
% to those levels.
%
% The format of the LevelizeImageChannel method is:
%
% MagickBooleanType LevelizeImageChannel(Image *image,
% const ChannelType channel,const char *levels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantiumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
*/
MagickExport MagickBooleanType LevelizeImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
const double gamma)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) (RoundToQuantum(((MagickRealType) \
pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \
black_point))
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
register long
i;
ViewInfo
*image_view;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
/*
Level colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=LevelizeValue(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=LevelizeValue(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=LevelizeValue(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=LevelizeValue(image->colormap[i].opacity);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=LevelizeValue(q->red);
if ((channel & GreenChannel) != 0)
q->green=LevelizeValue(q->green);
if ((channel & BlueChannel) != 0)
q->blue=LevelizeValue(q->blue);
if ((channel & OpacityChannel) != 0)
q->opacity=LevelizeValue(q->opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=LevelizeValue(indexes[x]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() will map the given color to "black" and "white"
% values, limearly spreading out the colors, and level values on a channel by
% channel bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels seperatally.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
% const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
#define LevelColorImageTag "LevelColor/Image"
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status = MagickFalse;
if ( invert == MagickFalse ) {
if ((channel & RedChannel) != 0)
status |= LevelImageChannel(image,RedChannel,
black_color->red,white_color->red, (double)1.0);
if ((channel & GreenChannel) != 0)
status |= LevelImageChannel(image,GreenChannel,
black_color->green,white_color->green, (double)1.0);
if ((channel & BlueChannel) != 0)
status |= LevelImageChannel(image,BlueChannel,
black_color->blue,white_color->blue, (double)1.0);
if ((channel & OpacityChannel) != 0)
status |= LevelImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity, (double)1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status |= LevelImageChannel(image,IndexChannel,
black_color->index,white_color->index, (double)1.0);
}
else {
if ((channel & RedChannel) != 0)
status |= LevelizeImageChannel(image,RedChannel,
black_color->red,white_color->red, (double)1.0);
if ((channel & GreenChannel) != 0)
status |= LevelizeImageChannel(image,GreenChannel,
black_color->green,white_color->green, (double)1.0);
if ((channel & BlueChannel) != 0)
status |= LevelizeImageChannel(image,BlueChannel,
black_color->blue,white_color->blue, (double)1.0);
if ((channel & OpacityChannel) != 0)
status |= LevelizeImageChannel(image,OpacityChannel,
black_color->opacity,white_color->opacity, (double)1.0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status |= LevelizeImageChannel(image,IndexChannel,
black_color->index,white_color->index, (double)1.0);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The LinearStretchImage() discards any pixels below the black point and
% above the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point)
{
#define LinearStretchImageTag "LinearStretch/Image"
ExceptionInfo
*exception;
long
black,
white,
y;
MagickBooleanType
status;
MagickRealType
*histogram,
intensity;
MagickSizeType
number_pixels;
register const PixelPacket
*p;
register long
x;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*histogram));
if (histogram == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
exception=(&image->exception);
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=(long) image->columns-1; x >= 0; x--)
{
histogram[ScaleQuantumToMap(PixelIntensityToQuantum(p))]++;
p++;
}
}
/*
Find the histogram boundaries by locating the black and white point levels.
*/
number_pixels=(MagickSizeType) image->columns*image->rows;
intensity=0.0;
for (black=0; black < (long) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(long) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(MagickRealType *) RelinquishMagickMemory(histogram);
status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white,
1.0);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. And if the colorspace is
% HWB, use blackness, whiteness, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and
% hue.
%
*/
static void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,
Quantum *red,Quantum *green,Quantum *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static void ModulateHWB(const double percent_hue,const double percent_whiteness, const double percent_blackness,Quantum *red,Quantum *green,Quantum *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=0.5*(0.01*percent_hue-1.0);
while (hue < 0.0)
hue+=1.0;
while (hue > 1.0)
hue-=1.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate)
{
#define ModulateImageTag "Modulate/Image"
double
percent_brightness,
percent_hue,
percent_saturation;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
long
progress,
y;
MagickBooleanType
status;
MagickStatusType
flags;
register long
i;
ViewInfo
*image_view;
/*
Initialize gamma table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
(void) SetImageColorspace(image,RGBColorspace);
if (image->storage_class == PseudoClass)
{
/*
Modulate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
switch (image->colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&image->colormap[i].red,&image->colormap[i].green,
&image->colormap[i].blue);
break;
}
}
}
/*
Modulate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (long) image->columns; x++)
{
switch (image->colorspace)
{
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&q->red,&q->green,&q->blue);
break;
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImageChannel method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale)
% MagickBooleanType NegateImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType grayscale)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale)
{
MagickBooleanType
status;
status=NegateImageChannel(image,DefaultChannels,grayscale);
return(status);
}
MagickExport MagickBooleanType NegateImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType grayscale)
{
#define NegateImageTag "Negate/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
register long
i;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
/*
Negate colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((channel & RedChannel) != 0)
image->colormap[i].red=(Quantum) QuantumRange-
image->colormap[i].red;
if ((channel & GreenChannel) != 0)
image->colormap[i].green=(Quantum) QuantumRange-
image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=(Quantum) QuantumRange-
image->colormap[i].blue;
}
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
if (grayscale != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((q->red != q->green) || (q->green != q->blue))
{
q++;
continue;
}
if ((channel & RedChannel) != 0)
q->red=(Quantum) QuantumRange-q->red;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) QuantumRange-q->green;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) QuantumRange-q->blue;
if ((channel & OpacityChannel) != 0)
q->opacity=(Quantum) QuantumRange-q->opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) QuantumRange-indexes[x];
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=(Quantum) QuantumRange-q->red;
if ((channel & GreenChannel) != 0)
q->green=(Quantum) QuantumRange-q->green;
if ((channel & BlueChannel) != 0)
q->blue=(Quantum) QuantumRange-q->blue;
if ((channel & OpacityChannel) != 0)
q->opacity=(Quantum) QuantumRange-q->opacity;
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) QuantumRange-indexes[x];
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image)
% MagickBooleanType NormalizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image)
{
MagickBooleanType
status;
status=NormalizeImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType NormalizeImageChannel(Image *image,
const ChannelType channel)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.02;
white_point=(double) image->columns*image->rows*0.99;
return(ContrastStretchImageChannel(image,channel,black_point,white_point));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels)
% MagickBooleanType SigmoidalContrastImageChannel(Image *image,
% const ChannelType channel,const MagickBooleanType sharpen,
% const double contrast,const double midpoint)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: control the "shoulder" of the contast curve.
%
% o midpoint: control the "toe" of the contast curve.
%
*/
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const char *levels)
{
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickStatusType
flags;
flags=ParseGeometry(levels,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0*QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0;
status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen,
geometry_info.rho,geometry_info.sigma);
return(status);
}
MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image,
const ChannelType channel,const MagickBooleanType sharpen,
const double contrast,const double midpoint)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickRealType
*sigmoidal_map;
register long
i;
ViewInfo
*image_view;
/*
Allocate and initialize sigmoidal maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*sigmoidal_map));
if (sigmoidal_map == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i <= (long) MaxMap; i++)
{
if (sharpen != MagickFalse)
{
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*((1.0/(1.0+exp(contrast*(midpoint/(double) QuantumRange-
(double) i/MaxMap))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))/((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange)))))+0.5));
continue;
}
sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType)
(MaxMap*(QuantumScale*midpoint-log((1.0-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))+((double) i/MaxMap)*((1.0/
(1.0+exp(contrast*(midpoint/(double) QuantumRange-1.0))))-(1.0/
(1.0+exp(midpoint/(double) QuantumRange*contrast))))))/
(1.0/(1.0+exp(midpoint/(double) QuantumRange*contrast))+
((double) i/MaxMap)*((1.0/(1.0+exp(contrast*(midpoint/
(double) QuantumRange-1.0))))-(1.0/(1.0+exp(midpoint/
(double) QuantumRange*contrast))))))/contrast)));
}
if (image->storage_class == PseudoClass)
{
/*
Sigmoidal-contrast enhance colormap.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (i=0; i < (long) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=RoundToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].red)]);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=RoundToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].green)]);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=RoundToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].blue)]);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=RoundToQuantum(sigmoidal_map[
ScaleQuantumToMap(image->colormap[i].opacity)]);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (long) image->columns; x++)
{
if ((channel & RedChannel) != 0)
q->red=RoundToQuantum(sigmoidal_map[ScaleQuantumToMap(q->red)]);
if ((channel & GreenChannel) != 0)
q->green=RoundToQuantum(sigmoidal_map[ScaleQuantumToMap(q->green)]);
if ((channel & BlueChannel) != 0)
q->blue=RoundToQuantum(sigmoidal_map[ScaleQuantumToMap(q->blue)]);
if ((channel & OpacityChannel) != 0)
q->opacity=RoundToQuantum(sigmoidal_map[ScaleQuantumToMap(q->opacity)]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=(IndexPacket) RoundToQuantum(sigmoidal_map[
ScaleQuantumToMap(indexes[x])]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map);
return(status);
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
typename DType,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
});
// lhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
});
}
// rhs grad
if (req[1] != kNullOp) {
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
});
// rhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, mshadow::op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
});
}
}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the inputs to be dense and still produce a sparse output
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool lhs_dense_ok = true, bool rhs_dense_ok = true>
static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched) {
if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) {
// rsp, rsp -> rsp
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
} else if ((lhs_stype == kCSRStorage && rhs_dense_ok) ||
(rhs_stype == kCSRStorage && lhs_dense_ok)) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
}
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
if (*dispatch_mode == DispatchMode::kFComputeFallback) {
LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage))
&& (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
});
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
fm_loss.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef DIFACTO_LOSS_FM_LOSS_H_
#define DIFACTO_LOSS_FM_LOSS_H_
#include <vector>
#include <cmath>
#include "difacto/base.h"
#include "dmlc/data.h"
#include "dmlc/io.h"
#include "difacto/loss.h"
#include "common/spmv.h"
#include "common/spmm.h"
#include "./logit_loss.h"
namespace difacto {
/**
* \brief parameters for FM loss
*/
struct FMLossParam : public dmlc::Parameter<FMLossParam> {
/**
* \brief the embedding dimension
*/
int V_dim;
DMLC_DECLARE_PARAMETER(FMLossParam) {
DMLC_DECLARE_FIELD(V_dim).set_range(0, 10000);
}
};
/**
* \brief the factonization machine loss
* :math:`f(x) = \langle w, x \rangle + \frac{1}{2} \|V x\|_2^2 - \sum_{i=1}^d x_i^2 \|V_i\|^2_2`
*/
class FMLoss : public Loss {
public:
FMLoss() {}
virtual ~FMLoss() {}
KWArgs Init(const KWArgs& kwargs) override {
return param_.InitAllowUnknown(kwargs);
}
/**
* \brief perform prediction
*
* pred = X * w + .5 * sum((X*V).^2 - (X.*X)*(V.*V), 2);
*
* where
* - sum(A, 2) : sum the rows of A
* - .* : elemenetal-wise times
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
CHECK_EQ(param.size(), 3);
Predict(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
pred);
}
void Predict(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
SArray<real_t>* pred) {
// pred = X * w
SArray<real_t> w = weights;
SpMV::Times(data, w, pred, nthreads_, w_pos, {});
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XV_ = X*V
XV_.clear();
XV_.resize(data.size * V_dim, 0);
SpMM::Times(data, V, V_dim, &XV_, nthreads_, V_pos);
// XX = X.*X
auto XX = data;
if (XX.value) {
XX_.clear();
XX_.CopyFrom(XX.value+XX.offset[0], XX.offset[XX.size] - XX.offset[0]);
for (auto& v : XX_) v *= v;
XX.value = XX_.data();
}
// VV = V*V
SArray<real_t> VV(V.size());
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) VV[p+j] = V[p+j] * V[p+j];
}
// XXVV = XX*VV
SArray<real_t> XXVV(XV_.size());
SpMM::Times(XX, VV, V_dim, &XXVV, nthreads_, V_pos);
// py += .5 * sum((V.XV).^2 - xxvv)
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < pred->size(); ++i) {
real_t* t = XV_.data() + i * V_dim;
real_t* tt = XXVV.data() + i * V_dim;
real_t s = 0;
for (int j = 0; j < V_dim; ++j) s += t[j] * t[j] - tt[j];
(*pred)[i] += .5 * s;
}
// projection
for (auto& p : *pred) p = p > 20 ? 20 : (p < -20 ? -20 : p);
}
/*!
* \brief compute the gradients
*
* p = - y ./ (1 + exp (y .* pred));
* grad_w = X' * p;
* grad_u = X' * diag(p) * X * V - diag((X.*X)'*p) * V
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* - param[3], real_t vector, the predict output
* @param grad the results
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
CHECK_EQ(param.size(), 4);
CalcGrad(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
SArray<real_t>(param[3]),
grad);
}
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
const SArray<real_t>& pred,
SArray<real_t>* grad) {
// p = ...
SArray<real_t> p; p.CopyFrom(pred);
CHECK_EQ(p.size(), data.size);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - y / (1 + std::exp(y * p[i]));
}
// grad_w = ...
SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos);
// grad_u = ...
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XXp = (X.*X)'*p
auto XX = data;
if (XX.value) {
CHECK_EQ(XX_.size(), XX.offset[XX.size] - XX.offset[0]);
XX.value = XX_.data();
}
SArray<real_t> XXp(V_pos.size());
SpMV::TransTimes(XX, p, &XXp, nthreads_);
// grad_u -= diag(XXp) * V,
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) {
(*grad)[p+j] -= V[p+j] * XXp[i];
}
}
// XV_ = diag(p) * X * V
CHECK_EQ(XV_.size(), data.size * V_dim);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
for (int j = 0; j < V_dim; ++j) XV_[i*V_dim+j] *= p[i];
}
// grad_u += X' * diag(p) * X * V
SpMM::TransTimes(data, XV_, V_dim, grad, nthreads_, {}, V_pos);
}
private:
SArray<real_t> XV_;
SArray<dmlc::real_t> XX_;
FMLossParam param_;
};
} // namespace difacto
#endif // DIFACTO_LOSS_FM_LOSS_H_
|
GB_unop__identity_fc32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint8)
// op(A') function: GB (_unop_tran__identity_fc32_uint8)
// C type: GxB_FC32_t
// A type: uint8_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint8)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
magsac.h | #pragma once
#include <limits>
#include <chrono>
#include <memory>
#include "model.h"
#include "model_score.h"
#include "samplers/sampler.h"
#include "samplers/uniform_sampler.h"
#include <math.h>
#include "gamma_values.cpp"
#ifdef _WIN32
#include <ppl.h>
#endif
#include <gflags/gflags.h>
#include <glog/logging.h>
template <class DatumType, class ModelEstimator>
class MAGSAC
{
public:
enum Version {
// The original version of MAGSAC. It works well, however, can be quite slow in many cases.
MAGSAC_ORIGINAL,
// The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster.
MAGSAC_PLUS_PLUS };
MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) :
time_limit(std::numeric_limits<double>::max()), //
desired_fps(-1),
iteration_limit(std::numeric_limits<size_t>::max()),
maximum_threshold(10.0),
apply_post_processing(true),
mininum_iteration_number(50),
partition_number(5),
core_number(1),
number_of_irwls_iters(1),
interrupting_threshold(1.0),
last_iteration_number(0),
log_confidence(0),
point_number(0),
magsac_version(magsac_version_)
{
}
~MAGSAC() {}
// A function to run MAGSAC.
bool run(
const cv::Mat &points_, // The input data points
const double confidence_, // The required confidence in the results
ModelEstimator& estimator_, // The model estimator
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used
gcransac::Model &obtained_model_, // The estimated model parameters
int &iteration_number_, // The number of iterations done
ModelScore &model_score_); // The score of the estimated model
// A function to set the maximum inlier-outlier threshold
void setMaximumThreshold(const double maximum_threshold_)
{
maximum_threshold = maximum_threshold_;
}
// A function to set the inlier-outlier threshold used for speeding up the procedure
// and for determining the required number of iterations.
void setReferenceThreshold(const double threshold_)
{
interrupting_threshold = threshold_;
}
double getReferenceThreshold()
{
return interrupting_threshold;
}
// Setting the flag determining if post-processing is needed
void applyPostProcessing(bool value_)
{
apply_post_processing = value_;
}
// A function to set the maximum number of iterations
void setIterationLimit(size_t iteration_limit_)
{
iteration_limit = iteration_limit_;
}
// A function to set the minimum number of iterations
void setMinimumIterationNumber(size_t mininum_iteration_number_)
{
mininum_iteration_number = mininum_iteration_number_;
}
// A function to set the number of cores used in the original MAGSAC algorithm.
// In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel,
// it is beneficial to keep the core number one for each independent MAGSAC.
// Otherwise, the threads will act weirdly.
void setCoreNumber(size_t core_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
LOG(ERROR) << "Setting the core number for MAGSAC++ is deprecated.";
core_number = core_number_;
}
// Setting the number of partitions used in the original MAGSAC algorithm
// to speed up the procedure. In MAGSAC++, this parameter is not used.
void setPartitionNumber(size_t partition_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
LOG(ERROR) << "Setting the partition number for MAGSAC++ is deprecated.";
partition_number = partition_number_;
}
// A function to set a desired minimum frames-per-second (FPS) value.
void setFPS(int fps_)
{
desired_fps = fps_; // The required FPS.
// The time limit which the FPS implies
time_limit = fps_ <= 0 ?
std::numeric_limits<double>::max() :
1.0 / fps_;
}
// The post-processing algorithm applying sigma-consensus to the input model once.
bool postProcessing(
const cv::Mat &points, // All data points
const gcransac::Model &so_far_the_best_model, // The input model to be improved
gcransac::Model &output_model, // The improved model parameters
ModelScore &output_score, // The score of the improved model
const ModelEstimator &estimator); // The model estimator
// The function determining the quality/score of a model using the original MAGSAC
// criterion. Note that this function is significantly slower than the quality
// function of MAGSAC++.
void getModelQuality(
const cv::Mat& points_, // All data points
const gcransac::Model& model_, // The input model
const ModelEstimator& estimator_, // The model estimator
double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale
double& score_); // The score/quality of the model
// The function determining the quality/score of a
// model using the MAGSAC++ criterion.
void getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_); // The score of the previous so-far-the-best model
size_t number_of_irwls_iters;
protected:
Version magsac_version; // The version of MAGSAC used
size_t iteration_limit; // Maximum number of iterations allowed
size_t mininum_iteration_number; // Minimum number of iteration before terminating
double maximum_threshold; // The maximum sigma value
size_t core_number; // Number of core used in sigma-consensus
double time_limit; // A time limit after the algorithm is interrupted
int desired_fps; // The desired FPS (TODO: not tested with MAGSAC)
bool apply_post_processing; // Decides if the post-processing step should be applied
int point_number; // The current point number
int last_iteration_number; // The iteration number implied by the last run of sigma-consensus
double log_confidence; // The logarithm of the required confidence
size_t partition_number; // Number of partitions used to speed up sigma-consensus
double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model
bool sigmaConsensus(
const cv::Mat& points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore& score_,
const ModelEstimator& estimator_,
const ModelScore& best_score_);
bool sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_);
};
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::run(
const cv::Mat& points_,
const double confidence_,
ModelEstimator& estimator_,
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_,
gcransac::Model& obtained_model_,
int& iteration_number_,
ModelScore &model_score_)
{
// Initialize variables
std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times
std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time
log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence
point_number = points_.rows; // Number of points
constexpr size_t sample_size = estimator_.sampleSize(); // The sample size required for the estimation
size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit
int iteration = 0; // Current number of iterations
gcransac::Model so_far_the_best_model; // Current best model
ModelScore so_far_the_best_score; // The score of the current best model
std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation
std::vector<size_t> pool(points_.rows);
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
pool[point_idx] = point_idx;
if (points_.rows < sample_size)
{
LOG(WARNING) << "There are not enough points for applying robust estimation. Minimum is "
<< static_cast<int>(sample_size)
<< "; while "
<< static_cast<int>(points_.rows)
<< " are given.";
return false;
}
// Set the start time variable if there is some time limit set
if (desired_fps > -1)
start = std::chrono::system_clock::now();
constexpr size_t max_unsuccessful_model_generations = 50;
// Main MAGSAC iteration
while (mininum_iteration_number > iteration ||
iteration < max_iteration)
{
// Increase the current iteration number
++iteration;
// Sample a minimal subset
std::vector<gcransac::Model> models; // The set of estimated models
size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations
// Try to select a minimal sample and estimate the implied model parameters
while (++unsuccessful_model_generations < max_unsuccessful_model_generations)
{
// Get a minimal sample randomly
if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected
minimal_sample.get(), // The minimal sample
sample_size)) // The size of a minimal sample
continue;
// Check if the selected sample is valid before estimating the model
// parameters which usually takes more time.
if (!estimator_.isValidSample(points_, // All points
minimal_sample.get())) // The current sample
continue;
// Estimate the model from the minimal sample
if (estimator_.estimateModel(points_, // All data points
minimal_sample.get(), // The selected minimal sample
&models)) // The estimated models
break;
}
// If the method was not able to generate any usable models, break the cycle.
iteration += unsuccessful_model_generations - 1;
// Select the so-far-the-best from the estimated models
for (const auto &model : models)
{
ModelScore score; // The score of the current model
gcransac::Model refined_model; // The refined model parameters
// Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma
bool success;
if (magsac_version == Version::MAGSAC_ORIGINAL)
success = sigmaConsensus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
else
success = sigmaConsensusPlusPlus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
// Continue if the model was rejected
if (!success || score.score == -1)
continue;
// Save the iteration number when the current model is found
score.iteration = iteration;
// Update the best model parameters if needed
if (so_far_the_best_score < score)
{
so_far_the_best_model = refined_model; // Update the best model parameters
so_far_the_best_score = score; // Update the best model's score
max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase
}
}
// Update the time parameters if a time limit is set
if (desired_fps > -1)
{
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
// Interrupt if the time limit is exceeded
if (elapsed_seconds.count() > time_limit)
break;
}
}
// Apply sigma-consensus as a post processing step if needed and the estimated model is valid
if (apply_post_processing)
{
// TODO
}
obtained_model_ = so_far_the_best_model;
iteration_number_ = iteration;
model_score_ = so_far_the_best_score;
return so_far_the_best_score.score > 0;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::postProcessing(
const cv::Mat &points_,
const gcransac::Model &model_,
gcransac::Model &refined_model_,
ModelScore &refined_score_,
const ModelEstimator &estimator_)
{
LOG(WARNING) << "Sigma-consensus++ is not implemented yet as post-processing.";
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// Set up the parameters
constexpr double L = 1.05;
constexpr double k = ModelEstimator::getSigmaQuantile();
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
constexpr size_t sample_size = estimator_.sampleSize();
static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; };
const int point_number = points_.rows;
double current_maximum_sigma = this->maximum_threshold;
// Calculating the residuals
std::vector< std::pair<double, size_t> > all_residuals;
all_residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of inliers which should be exceeded
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
std::vector<gcransac::Model> sigma_models;
std::vector<size_t> sigma_inliers;
std::vector<double> final_weights;
// The number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// Sort the residuals in ascending order
std::sort(all_residuals.begin(), all_residuals.end(), comparator);
// The maximum threshold is set to be slightly bigger than the distance of the
// farthest possible inlier.
current_maximum_sigma =
all_residuals.back().first + std::numeric_limits<double>::epsilon();
const double sigma_step = current_maximum_sigma / partition_number;
last_iteration_number = 10000;
score_.score = 0;
// The weights calculated by each parallel process
std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0));
// If OpenMP is used, calculate things in parallel
#ifdef USE_OPENMP
#pragma omp parallel for num_threads(core_number)
for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx)
{
// The maximum sigma value in the current partition
const double max_sigma = (partition_idx + 1) * sigma_step;
// Find the last element which has smaller distance than 'max_threshold'
// Since the vector is ordered binary search can be used to find that particular element.
const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator);
const size_t sigma_inlier_number = last_element - all_residuals.begin();
// Put the indices into a vector
std::vector<size_t> sigma_inliers;
sigma_inliers.reserve(sigma_inlier_number);
// Store the points which are closer than the current sigma limit
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx)
sigma_inliers.emplace_back(all_residuals[relative_point_idx].second);
// Check if there are enough inliers to fit a model
if (sigma_inliers.size() > sample_size)
{
// Estimating the model which the current set of inliers imply
std::vector<gcransac::Model> sigma_models;
estimator_.estimateModelNonminimal(points_,
&(sigma_inliers)[0],
sigma_inlier_number,
&sigma_models);
// If the estimation was successful calculate the implied probabilities
if (sigma_models.size() == 1)
{
const double max_sigma_squared_2 = 2 * max_sigma * max_sigma;
double residual_i_2, // The residual of the i-th point
probability_i; // The probability of the i-th point
// Iterate through all points to estimate the related probabilities
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx)
{
// TODO: Replace with Chi-square instead of normal distribution
const size_t &point_idx = sigma_inliers[relative_point_idx];
// Calculate the residual of the current point
residual_i_2 = estimator_.squaredResidual(points_.row(point_idx),
sigma_models[0]);
// Calculate the probability of the i-th point assuming Gaussian distribution
// TODO: replace by Chi-square distribution
probability_i = exp(-residual_i_2 / max_sigma_squared_2);
// Store the probability of the i-th point coming from the current partition
point_weights_par[partition_idx][relative_point_idx] += probability_i;
}
}
}
}
#else
LOG(ERROR) << "Not implemented yet.";
#endif
// The weights used for the final weighted least-squares fitting
// If point normalization is applied the indexing of the weights differs.
// In that case
// final_weights[i] is the weight of inlier[i]-th point
// Otherwise,
// final_weights[i] is the weight of i-th point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.reserve(possible_inlier_number);
else
final_weights.resize(point_number, 0);
// Collect all points which has higher probability of being inlier than zero
sigma_inliers.reserve(possible_inlier_number);
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
// Calculate the weight of the current point
double weight = 0.0;
for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx)
weight += point_weights_par[partition_idx][point_idx];
// If the weight is approx. zero, continue.
if (weight < std::numeric_limits<double>::epsilon())
continue;
// Store the index and weight of the current point
sigma_inliers.emplace_back(all_residuals[point_idx].second);
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.emplace_back(weight);
else
final_weights[point_idx] = weight;
}
// If there are fewer inliers than the size of the minimal sample interupt the procedure
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(final_weights)[0])) // Weights of points
return false;
bool is_model_updated = false;
if (sigma_models.size() == 1 && // If only a single model is estimated
estimator_.isValidModel(sigma_models.back(),
points_,
sigma_inliers,
&(sigma_inliers)[0],
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = sigma_models.back();
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQuality(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
marginalized_iteration_number, // The marginalized inlier ratio
score_.score); // The marginalized score
if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number))
last_iteration_number = std::numeric_limits<int>::max();
else
last_iteration_number = static_cast<int>(round(marginalized_iteration_number));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// TODO: check
constexpr double C = ModelEstimator::getC();
// The size of a minimal sample used for the estimation
constexpr size_t sample_size = estimator_.sampleSize();
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two);
// Calculating C * 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double C_times_two_ad_dof = C * two_ad_dof;
// Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_value = tgamma(dof_minus_one_per_two);
// Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2.
constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_difference = gamma_value - gamma_k;
// The number of points provided
const int point_number = points_.rows;
// The manually set maximum inlier-outlier threshold
double current_maximum_sigma = this->maximum_threshold;
// Calculating the pairs of (residual, point index).
std::vector< std::pair<double, size_t> > residuals;
// Occupy the maximum required memory to avoid doing it later.
residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of points close to the previous so-far-the-best model.
// This model should have more inliers.
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
// Models fit by weighted least-squares fitting
std::vector<gcransac::Model> sigma_models;
// Points used in the weighted least-squares fitting
std::vector<size_t> sigma_inliers;
// Weights used in the the weighted least-squares fitting
std::vector<double> sigma_weights;
// Number of points considered in the fitting
const size_t possible_inlier_number = residuals.size();
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_inliers.reserve(possible_inlier_number);
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_weights.reserve(possible_inlier_number);
// Calculate 2 * \sigma_{max}^2 a priori
const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0;
// Divide C * 2^(DoF - 1) by \sigma_{max} a priori
const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma;
// Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori
const double weight_zero = one_over_sigma * gamma_difference;
// Initialize the polished model with the initial one
gcransac::Model polished_model = model_;
// A flag to determine if the initial model has been updated
bool updated = false;
// Do the iteratively re-weighted least squares fitting
for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations)
{
// If the current iteration is not the first, the set of possibly inliers
// (i.e., points closer than the maximum threshold) have to be recalculated.
if (iterations > 0)
{
// The number of points close to the model
size_t points_close = 0;
// Remove everything from the residual vector
residuals.clear();
// Collect the points which are closer than the maximum threshold
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), polished_model);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
// Number of points closer than the threshold
const size_t possible_inlier_number = residuals.size();
// Clear the inliers and weights
sigma_inliers.clear();
sigma_weights.clear();
// Occupy the memory for the inliers and weights
sigma_inliers.reserve(possible_inlier_number);
sigma_weights.reserve(possible_inlier_number);
}
if constexpr (!ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.resize(point_number, 0);
// Calculate the weight of each point
for (const auto &[residual, idx] : residuals)
{
// The weight
double weight = 0.0;
// If the residual is ~0, the point fits perfectly and it is handled differently
if (residual < std::numeric_limits<double>::epsilon())
weight = weight_zero;
else
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2);
// Put the index of the point into the vector of points used for the least squares fitting
sigma_inliers.emplace_back(idx);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_gamma_number < x)
x = stored_gamma_number;
// Calculate the weight of the point
weight = one_over_sigma * (stored_gamma_values[x] - gamma_k);
}
// Store the weight of the point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.emplace_back(weight);
else
sigma_weights[idx] = weight;
}
// If there are fewer than the minimum point close to the model,
// terminate.
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(sigma_weights)[0])) // Weights of points
{
// If the estimation failed and the iteration was never successfull,
// terminate with failure.
if (iterations == 0)
return false;
// Otherwise, if the iteration was successfull at least once,
// simply break it.
break;
}
// Update the model parameters
polished_model = sigma_models[0];
// Clear the vector of models and keep only the best
sigma_models.clear();
// The model has been updated
updated = true;
}
bool is_model_updated = false;
if (updated && // If the model has been updated
estimator_.isValidModel(polished_model,
points_,
sigma_inliers,
&(sigma_inliers[0]),
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = polished_model;
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQualityPlusPlus(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
score_.score, // The marginalized score
best_score_.score); // The score of the previous so-far-the-best model
// Update the iteration number
last_iteration_number =
log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_) // The score of the previous so-far-the-best model
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// Calculating (DoF + 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0;
// TODO: check
constexpr double C = 0.25;
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two);
// Calculating 2^(DoF + 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two);
// Calculate the gamma value of k
constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculate the lower incomplete gamma value of k
constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK();
// The number of points provided
const int point_number = points_.rows;
// The previous best loss
const double previous_best_loss = 1.0 / previous_best_score_;
// Convert the maximum threshold to a sigma value
const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold;
// Calculate the squared maximum sigma
const double maximum_sigma_2 = maximum_sigma * maximum_sigma;
// Calculate \sigma_{max}^2 / 2
const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0;
// Calculate 2 * \sigma_{max}^2
const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0;
// Calculate the loss implied by an outlier
const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k;
// Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma;
// The loss which a point implies
double loss = 0.0,
// The total loss regarding the current model
total_loss = 0.0;
// Iterate through all points to calculate the implied loss
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, consider it outlier
// and add the loss implied to the total loss.
if (maximum_threshold < residual)
loss = outlier_loss;
else // Otherwise, consider the point inlier, and calculate the implied loss
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Divide the residual by the 2 * \sigma^2
const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_incomplete_gamma_number < x)
x = stored_incomplete_gamma_number;
// Calculate the loss implied by the current point
loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] +
squared_residual / 4.0 * (stored_complete_gamma_values[x] -
gamma_value_of_k);
loss = loss * two_ad_dof_plus_one_per_maximum_sigma;
}
// Update the total loss
total_loss += loss;
// Break the validation if there is no chance of being better than the previous
// so-far-the-best model.
if (previous_best_loss < total_loss)
break;
}
// Calculate the score of the model from the total loss
score_ = 1.0 / total_loss;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQuality(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &marginalized_iteration_number_, // The marginalized iteration number to be calculated
double &score_) // The score to be calculated
{
// Set up the parameters
constexpr size_t sample_size = estimator_.sampleSize();
const size_t point_number = points_.rows;
// Getting the inliers
std::vector<std::pair<double, size_t>> all_residuals;
all_residuals.reserve(point_number);
double max_distance = 0;
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, add it to the set of possible inliers
if (maximum_threshold > residual)
{
max_distance = MAX(max_distance, residual);
all_residuals.emplace_back(std::make_pair(residual, point_idx));
}
}
// Set the maximum distance to be slightly bigger than that of the farthest possible inlier
max_distance = max_distance +
std::numeric_limits<double>::epsilon();
// Number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// The extent of a partition
const double threshold_step = max_distance / partition_number;
// The maximum threshold considered in each partition
std::vector<double> thresholds(partition_number);
std::vector<double> thresholds_squared(partition_number);
std::vector<double> thresholds_2_squared(partition_number);
// Calculating the thresholds for each partition
for (size_t i = 0; i < partition_number; ++i)
{
thresholds[i] = (i + 1) * threshold_step;
thresholds_squared[i] = thresholds[i] * thresholds[i];
thresholds_2_squared[i] = 2 * thresholds_squared[i];
}
double residual_i, // Residual of the i-th point
residual_i_squared, // Squared residual of the i-th poin
probability_i; // Probability of the i-th point given the model
std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition
probabilities(partition_number, 1); // Probabilities for each partition
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
residual_i = all_residuals[point_idx].first;
residual_i_squared = residual_i * residual_i;
for (size_t i = 0; i < partition_number; ++i)
{
if (residual_i < thresholds[i])
{
probability_i = 1.0 - residual_i_squared / thresholds_squared[i];
++inliers[i];
probabilities[i] += probability_i;
}
}
}
score_ = 0;
marginalized_iteration_number_ = 0.0;
for (auto i = 0; i < partition_number; ++i)
{
score_ += probabilities[i];
marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size));
}
marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number;
}
|
simpson_integral.h | /*! \file simpson_integral.h
\brief simpsonの公式で数値積分を行うクラスの宣言と実装
Copyright © 2016-2017 @dc1394 All Rights Reserved.
*/
#ifndef _SIMPSON_INTEGRAL_H_
#define _SIMPSON_INTEGRAL_H_
#pragma once
#include "functional.h"
#include "paralleltype.h"
#include <algorithm> // for std::max, std::transform
#include <cstdint> // for std::int32_t
#include <functional> // for std::plus
#include <future> // for std::async, std::future
#include <thread> // for std::thread::hardware_concurrency
#include <valarray> // for std::valarray
#include <vector> // for std::vector
#include <omp.h> // for pragma omp parallel for
#include <ppl.h> // for concurrency::parallel_for
#include <pstl/execution> // for std::execution::par_unseq
#include <pstl/numeric> // for std::transform_reduce
#include <boost/mpl/int.hpp> // for boost::mpl::int_
#include <boost/range/irange.hpp> // for boost::irange
#include <cilk/cilk.h> // for cilk_for
#include <cilk/reducer_opadd.h> // for cilk::reducer_opadd
#include <tbb/blocked_range.h> // for tbb:blocked_range
#include <tbb/combinable.h> // for tbb::combinable
#include <tbb/parallel_for.h> // for tbb::parallel_for
#include <tbb/parallel_reduce.h> // for tbb:parallel_reduce
namespace simpson {
//! A template class.
/*!
simpsonの公式で数値積分を行うクラス
*/
template <typename FUNCTYPE>
class Simpson final {
// #region コンストラクタ・デストラクタ
public:
//! A constructor.
/*!
唯一のコンストラクタ
\param func_ 被積分関数
\param n simpsonの公式の分割数
\param x1 積分の下限
\param x2 積分の上限
*/
Simpson(myfunctional::Functional<FUNCTYPE> const & func_, std::int32_t n, double x1, double x2)
: func_(func_), n_(n), x1_(x1), dh_((x2 - x1) / static_cast<double>(n_)) {}
//! A destructor.
/*!
デフォルトデストラクタ
*/
~Simpson() = default;
// #endregion コンストラクタ・デストラクタ
// #region publicメンバ関数
//! A public member function (template function).
/*!
Simpsonの公式によって数値積分を実行する
\return 積分値
*/
template <ParallelType N>
double simpson() const
{
return (*this)(boost::mpl::int_<static_cast<std::int32_t>(N)>());
}
// #endregion publicメンバ関数
// #region privateメンバ関数
private:
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(Cilkで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Cilk)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(C++17 Parallel Algrithmで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::CPP17)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(並列化なし)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::NoParallel)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(OpenMPで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::OpenMp)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(PPLで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Ppl)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(std::asyncで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::StdAsync)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(TBBで並列化)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Tbb)>) const;
//! A private member function (template function).
/*!
Simpsonの公式によって数値積分を実行する(TBBで並列化その2)
\param テンプレート部分特殊化用の引数
\return 積分値
*/
double operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Tbb2)>) const;
// #endregion privateメンバ関数
// #region メンバ変数
//! A private member variable (constant).
/*!
被積分関数
*/
myfunctional::Functional<FUNCTYPE> const func_;
//! A private member variable (constant).
/*!
simpsonの公式の積分点
*/
std::int32_t const n_;
//! A private member variable (constant).
/*!
積分の下限
*/
double const x1_;
//! A private member variable (constant).
/*!
積分の微小区間
*/
double const dh_;
// #endregion メンバ変数
// #region 禁止されたコンストラクタ・メンバ関数
//! A private constructor (deleted).
/*!
デフォルトコンストラクタ(禁止)
*/
Simpson() = delete;
//! A private copy constructor (deleted).
/*!
コピーコンストラクタ(禁止)
*/
Simpson(Simpson const &) = delete;
//! A private member function (deleted).
/*!
operator=()の宣言(禁止)
\param コピー元のオブジェクト
\return コピー元のオブジェクト
*/
Simpson & operator=(Simpson const &) = delete;
// #endregion 禁止されたコンストラクタ・メンバ関数
};
// #region templateメンバ関数の実装
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Cilk)>) const
{
cilk::reducer_opadd<double> sum;
sum.set_value(0.0);
cilk_for (auto i = 0; i < n_; i += 2) {
auto const f0 = func_(x1_ + static_cast<double>(i) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i + 2) * dh_);
sum += (f0 + 4.0 * f1 + f2);
}
return sum.get_value() * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::CPP17)>) const
{
auto const range = boost::irange<std::int32_t>(0, n_ / 2);
auto const res = std::transform_reduce(
std::execution::par_unseq,
range.begin(),
range.end(),
0.0,
std::plus<double>(),
[this](auto const i) {
auto const f0 = func_(x1_ + static_cast<double>(i * 2) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i * 2 + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i * 2 + 2) * dh_);
return f0 + 4.0 * f1 + f2;
}
);
return res * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::NoParallel)>) const
{
auto sum = 0.0;
for (auto i = 0; i < n_; i += 2) {
auto const f0 = func_(x1_ + static_cast<double>(i) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i + 2) * dh_);
sum += (f0 + 4.0 * f1 + f2);
}
return sum * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::OpenMp)>) const
{
auto sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (auto i = 0; i < n_; i += 2) {
auto const f0 = func_(x1_ + static_cast<double>(i) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i + 2) * dh_);
sum += (f0 + 4.0 * f1 + f2);
}
return sum * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Ppl)>) const
{
concurrency::combinable<double> sum;
concurrency::parallel_for<std::int32_t>(
0,
n_ / 2,
[&](auto const i) {
auto const f0 = func_(x1_ + static_cast<double>(i * 2) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i * 2 + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i * 2 + 2) * dh_);
sum.local() += (f0 + 4.0 * f1 + f2);
});
return sum.combine(std::plus<double>()) * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::StdAsync)>) const
{
auto const numthreads = static_cast<std::int32_t>((std::max)(std::thread::hardware_concurrency(), 1u));
std::vector<std::future<double>> future(numthreads);
auto const nmax = n_ / 2;
for (auto i = 0; i < numthreads; i++) {
std::int32_t localnmax;
if (i == numthreads - 1) {
localnmax = nmax;
}
else {
localnmax = nmax / numthreads * (i + 1);
}
auto const localnmin = nmax / numthreads * i;
future[i] = std::async(
std::launch::async,
[this](auto nmin, auto nmax) {
auto sum = 0.0;
for (auto i = nmin; i < nmax; i++) {
auto const f0 = func_(x1_ + static_cast<double>(i * 2) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i * 2 + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i * 2 + 2) * dh_);
sum += (f0 + 4.0 * f1 + f2);
}
return sum;
},
localnmin,
localnmax);
}
std::valarray<double> result(numthreads);
std::transform(future.begin(), future.end(), std::begin(result), [](auto && f) { return f.get(); });
return result.sum() * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Tbb)>) const
{
auto const sum = tbb::parallel_reduce(
tbb::blocked_range<std::int32_t>(0, n_ / 2),
0.0,
[&](auto const & range, auto sumlocal) {
for (auto && i = range.begin(); i != range.end(); ++i) {
auto const f0 = func_(x1_ + static_cast<double>(i * 2) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i * 2 + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i * 2 + 2) * dh_);
sumlocal += (f0 + 4.0 * f1 + f2);
}
return sumlocal;
},
std::plus<double>());
return sum * dh_ / 3.0;
}
template <typename FUNCTYPE>
double Simpson<FUNCTYPE>::operator()(boost::mpl::int_<static_cast<std::int32_t>(ParallelType::Tbb2)>) const
{
tbb::combinable<double> sum;
tbb::parallel_for(
tbb::blocked_range<std::int32_t>(0, n_ / 2),
[&](auto const & range) {
auto sumlocal = 0.0;
for (auto && i = range.begin(); i != range.end(); ++i) {
auto const f0 = func_(x1_ + static_cast<double>(i * 2) * dh_);
auto const f1 = func_(x1_ + static_cast<double>(i * 2 + 1) * dh_);
auto const f2 = func_(x1_ + static_cast<double>(i * 2 + 2) * dh_);
sumlocal += (f0 + 4.0 * f1 + f2);
}
sum.local() += sumlocal;
});
return sum.combine(std::plus<double>()) * dh_ / 3.0;
}
// #endregion templateメンバ関数の実装
}
#endif // _SIMPSON_INTEGRAL_H_
|
configurator.c | /* Simple tool to create config.h.
* Would be much easier with ccan modules, but deliberately standalone.
*
* Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license.
*
* c12r_err, c12r_errx functions copied from ccan/err/err.c
* Copyright Rusty Russell <rusty@rustcorp.com.au>. CC0 (Public domain) License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define _POSIX_C_SOURCE 200809L /* For pclose, popen, strdup */
#define EXIT_BAD_USAGE 1
#define EXIT_TROUBLE_RUNNING 2
#define EXIT_BAD_TEST 3
#define EXIT_BAD_INPUT 4
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef _MSC_VER
#define popen _popen
#define pclose _pclose
#endif
#ifdef _MSC_VER
#define DEFAULT_COMPILER "cl"
/* Note: Dash options avoid POSIX path conversion when used under msys bash
* and are therefore preferred to slash (e.g. -nologo over /nologo)
* Note: Disable Warning 4200 "nonstandard extension used : zero-sized array
* in struct/union" for flexible array members.
*/
#define DEFAULT_FLAGS "-nologo -Zi -W4 -wd4200 " \
"-D_CRT_NONSTDC_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS"
#define DEFAULT_OUTPUT_EXE_FLAG "-Fe:"
#else
#define DEFAULT_COMPILER "cc"
#define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition"
#define DEFAULT_OUTPUT_EXE_FLAG "-o"
#endif
#define OUTPUT_FILE "configurator.out"
#define INPUT_FILE "configuratortest.c"
#ifdef _WIN32
#define DIR_SEP "\\"
#else
#define DIR_SEP "/"
#endif
static const char *progname = "";
static int verbose;
static bool like_a_libtool = false;
struct test {
const char *name;
const char *desc;
/*
* Template style flags (pick one):
* OUTSIDE_MAIN:
* - put a simple boilerplate main below it.
* DEFINES_FUNC:
* - defines a static function called func; adds ref to avoid warnings
* INSIDE_MAIN:
* - put this inside main().
* DEFINES_EVERYTHING:
* - don't add any boilerplate at all.
*
* Execution flags:
* EXECUTE:
* - a runtime test; must compile, exit 0 means flag is set.
* MAY_NOT_COMPILE:
* - Only useful with EXECUTE: don't get upset if it doesn't compile.
* <nothing>:
* - a compile test, if it compiles must run and exit 0.
*/
const char *style;
const char *depends;
const char *link;
const char *fragment;
const char *flags;
const char *overrides; /* On success, force this to '1' */
bool done;
bool answer;
};
/* Terminated by a NULL name */
static struct test *tests;
static const struct test base_tests[] = {
{ "HAVE_32BIT_OFF_T", "off_t is 32 bits",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 4 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ALIGNOF", "__alignof__ support",
"INSIDE_MAIN", NULL, NULL,
"return __alignof__(double) > 0 ? 0 : 1;" },
{ "HAVE_ASPRINTF", "asprintf() declaration",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdio.h>\n"
"static char *func(int x) {"
" char *p;\n"
" if (asprintf(&p, \"%u\", x) == -1) \n"
" p = NULL;\n"
" return p;\n"
"}" },
{ "HAVE_ATTRIBUTE_COLD", "__attribute__((cold)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((cold)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_CONST", "__attribute__((const)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((const)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_DEPRECATED", "__attribute__((deprecated)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((deprecated)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_NONNULL", "__attribute__((nonnull)) support",
"DEFINES_FUNC", NULL, NULL,
"static char *__attribute__((nonnull)) func(char *p) { return p; }" },
{ "HAVE_ATTRIBUTE_SENTINEL", "__attribute__((sentinel)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((sentinel)) func(int i, ...) { return i; }" },
{ "HAVE_ATTRIBUTE_PURE", "__attribute__((pure)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((pure)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_MAY_ALIAS", "__attribute__((may_alias)) support",
"OUTSIDE_MAIN", NULL, NULL,
"typedef short __attribute__((__may_alias__)) short_a;" },
{ "HAVE_ATTRIBUTE_NORETURN", "__attribute__((noreturn)) support",
"DEFINES_FUNC", NULL, NULL,
"#include <stdlib.h>\n"
"static void __attribute__((noreturn)) func(int x) { exit(x); }" },
{ "HAVE_ATTRIBUTE_PRINTF", "__attribute__ format printf support",
"DEFINES_FUNC", NULL, NULL,
"static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" },
{ "HAVE_ATTRIBUTE_UNUSED", "__attribute__((unused)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((unused)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_USED", "__attribute__((used)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((used)) func(int x) { return x; }" },
{ "HAVE_BACKTRACE", "backtrace() in <execinfo.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <execinfo.h>\n"
"static int func(int x) {"
" void *bt[10];\n"
" return backtrace(bt, 10) < x;\n"
"}" },
{ "HAVE_BIG_ENDIAN", "big endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" },
{ "HAVE_BSWAP_64", "bswap64 in byteswap.h",
"DEFINES_FUNC", "HAVE_BYTESWAP_H", NULL,
"#include <byteswap.h>\n"
"static int func(int x) { return bswap_64(x); }" },
{ "HAVE_BUILTIN_CHOOSE_EXPR", "__builtin_choose_expr support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_choose_expr(1, 0, \"garbage\");" },
{ "HAVE_BUILTIN_CLZ", "__builtin_clz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZL", "__builtin_clzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZLL", "__builtin_clzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZ", "__builtin_ctz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZL", "__builtin_ctzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZLL", "__builtin_ctzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CONSTANT_P", "__builtin_constant_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_constant_p(1) ? 0 : 1;" },
{ "HAVE_BUILTIN_EXPECT", "__builtin_expect support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_expect(argc == 1, 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_FFS", "__builtin_ffs support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffs(0) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSL", "__builtin_ffsl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsl(0L) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSLL", "__builtin_ffsll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsll(0LL) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNT", "__builtin_popcount support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcount(255) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTL", "__builtin_popcountl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountl(255L) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTLL", "__builtin_popcountll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountll(255LL) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_TYPES_COMPATIBLE_P", "__builtin_types_compatible_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_types_compatible_p(char *, int) ? 1 : 0;" },
{ "HAVE_ICCARM_INTRINSICS", "<intrinsics.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <intrinsics.h>\n"
"int func(int v) {\n"
" return __CLZ(__RBIT(v));\n"
"}" },
{ "HAVE_BYTESWAP_H", "<byteswap.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <byteswap.h>\n" },
{ "HAVE_CLOCK_GETTIME", "clock_gettime() declaration",
"DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC", NULL,
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n" },
{ "HAVE_CLOCK_GETTIME_IN_LIBRT", "clock_gettime() in librt",
"DEFINES_FUNC",
"HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME",
"-lrt",
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n",
/* This means HAVE_CLOCK_GETTIME, too */
"HAVE_CLOCK_GETTIME" },
{ "HAVE_COMPOUND_LITERALS", "compound literal support",
"INSIDE_MAIN", NULL, NULL,
"int *foo = (int[]) { 1, 2, 3, 4 };\n"
"return foo[0] ? 0 : 1;" },
{ "HAVE_FCHDIR", "fchdir support",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"#include <unistd.h>\n"
"int main(void) {\n"
" int fd = open(\"..\", O_RDONLY);\n"
" return fchdir(fd) == 0 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ERR_H", "<err.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <err.h>\n"
"static void func(int arg) {\n"
" if (arg == 0)\n"
" err(1, \"err %u\", arg);\n"
" if (arg == 1)\n"
" errx(1, \"err %u\", arg);\n"
" if (arg == 3)\n"
" warn(\"warn %u\", arg);\n"
" if (arg == 4)\n"
" warnx(\"warn %u\", arg);\n"
"}\n" },
{ "HAVE_FILE_OFFSET_BITS", "_FILE_OFFSET_BITS to get 64-bit offsets",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_32BIT_OFF_T", NULL,
"#define _FILE_OFFSET_BITS 64\n"
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 8 ? 0 : 1;\n"
"}\n" },
{ "HAVE_FOR_LOOP_DECLARATION", "for loop declaration support",
"INSIDE_MAIN", NULL, NULL,
"int ret = 1;\n"
"for (int i = 0; i < argc; i++) { ret = 0; };\n"
"return ret;" },
{ "HAVE_FLEXIBLE_ARRAY_MEMBER", "flexible array member support",
"OUTSIDE_MAIN", NULL, NULL,
"struct foo { unsigned int x; int arr[]; };" },
{ "HAVE_GETPAGESIZE", "getpagesize() in <unistd.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <unistd.h>\n"
"static int func(void) { return getpagesize(); }" },
{ "HAVE_ISBLANK", "isblank() in <ctype.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <ctype.h>\n"
"static int func(void) { return isblank(' '); }" },
{ "HAVE_LITTLE_ENDIAN", "little endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" },
{ "HAVE_MEMMEM", "memmem in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *h, size_t hl, void *n, size_t nl) {\n"
"return memmem(h, hl, n, nl);"
"}\n", },
{ "HAVE_MEMRCHR", "memrchr in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *s, int c, size_t n) {\n"
"return memrchr(s, c, n);"
"}\n", },
{ "HAVE_MMAP", "mmap() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/mman.h>\n"
"static void *func(int fd) {\n"
" return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n"
"}" },
{ "HAVE_PROC_SELF_MAPS", "/proc/self/maps exists",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"int main(void) {\n"
" return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n"
"}\n" },
{ "HAVE_QSORT_R_PRIVATE_LAST", "qsort_r cmp takes trailing arg",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdlib.h>\n"
"static int cmp(const void *lp, const void *rp, void *priv) {\n"
" *(unsigned int *)priv = 1;\n"
" return *(const int *)lp - *(const int *)rp; }\n"
"int main(void) {\n"
" int array[] = { 9, 2, 5 };\n"
" unsigned int called = 0;\n"
" qsort_r(array, 3, sizeof(int), cmp, &called);\n"
" return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n"
"}\n" },
{ "HAVE_STRUCT_TIMESPEC", "struct timespec declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <time.h>\n"
"static void func(void) {\n"
" struct timespec ts;\n"
" ts.tv_sec = ts.tv_nsec = 1;\n"
"}\n" },
{ "HAVE_SECTION_START_STOP", "__attribute__((section)) and __start/__stop",
"DEFINES_FUNC", NULL, NULL,
"static void *__attribute__((__section__(\"mysec\"))) p = &p;\n"
"static int func(void) {\n"
" extern void *__start_mysec[], *__stop_mysec[];\n"
" return __stop_mysec - __start_mysec;\n"
"}\n" },
{ "HAVE_STACK_GROWS_UPWARDS", "stack grows upwards",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <stddef.h>\n"
"static ptrdiff_t nest(const void *base, unsigned int i)\n"
"{\n"
" if (i == 0)\n"
" return (const char *)&i - (const char *)base;\n"
" return nest(base, i-1);\n"
"}\n"
"int main(int argc, char *argv[]) {\n"
" (void)argv;\n"
" return (nest(&argc, argc) > 0) ? 0 : 1;\n"
"}\n" },
{ "HAVE_STATEMENT_EXPR", "statement expression support",
"INSIDE_MAIN", NULL, NULL,
"return ({ int x = argc; x == argc ? 0 : 1; });" },
{ "HAVE_SYS_FILIO_H", "<sys/filio.h>",
"OUTSIDE_MAIN", NULL, NULL, /* Solaris needs this for FIONREAD */
"#include <sys/filio.h>\n" },
{ "HAVE_SYS_TERMIOS_H", "<sys/termios.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/termios.h>\n" },
{ "HAVE_SYS_UNISTD_H", "<sys/unistd.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/unistd.h>\n" },
{ "HAVE_TYPEOF", "__typeof__ support",
"INSIDE_MAIN", NULL, NULL,
"__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" },
{ "HAVE_UNALIGNED_ACCESS", "unaligned access to int",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <string.h>\n"
"int main(int argc, char *argv[]) {\n"
" (void)argc;\n"
" char pad[sizeof(int *) * 1];\n"
" strncpy(pad, argv[0], sizeof(pad));\n"
" int *x = (int *)pad, *y = (int *)(pad + 1);\n"
" return *x == *y;\n"
"}\n" },
{ "HAVE_UTIME", "utime() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static int func(const char *filename) {\n"
" struct utimbuf times = { 0 };\n"
" return utime(filename, ×);\n"
"}" },
{ "HAVE_WARN_UNUSED_RESULT", "__attribute__((warn_unused_result))",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static __attribute__((warn_unused_result)) int func(int i) {\n"
" return i + 1;\n"
"}" },
{ "HAVE_OPENMP", "#pragma omp and -fopenmp support",
"INSIDE_MAIN", NULL, NULL,
"int i;\n"
"#pragma omp parallel for\n"
"for(i = 0; i < 0; i++) {};\n"
"return 0;\n",
"-Werror -fopenmp" },
{ "HAVE_VALGRIND_MEMCHECK_H", "<valgrind/memcheck.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <valgrind/memcheck.h>\n" },
{ "HAVE_UCONTEXT", "working <ucontext.h",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
NULL, NULL,
"#include <ucontext.h>\n"
"static int x = 0;\n"
"static char stack[2048];\n"
"static ucontext_t a, b;\n"
"static void fn(void) {\n"
" x |= 2;\n"
" setcontext(&b);\n"
" x |= 4;\n"
"}\n"
"int main(void) {\n"
" x |= 1;\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, fn, 0);\n"
" swapcontext(&b, &a);\n"
" return (x == 3) ? 0 : 1;\n"
"}\n"
},
{ "HAVE_POINTER_SAFE_MAKECONTEXT", "passing pointers via makecontext()",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_UCONTEXT", NULL,
"#include <stddef.h>\n"
"#include <ucontext.h>\n"
"static int worked = 0;\n"
"static char stack[1024];\n"
"static ucontext_t a, b;\n"
"static void fn(void *p, void *q) {\n"
" void *cp = &worked;\n"
" void *cq = (void *)(~((ptrdiff_t)cp));\n"
" if ((p == cp) && (q == cq))\n"
" worked = 1;\n"
" setcontext(&b);\n"
"}\n"
"int main(void) {\n"
" void *ap = &worked;\n"
" void *aq = (void *)(~((ptrdiff_t)ap));\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n"
" swapcontext(&b, &a);\n"
" return worked ? 0 : 1;\n"
"}\n"
},
};
static void c12r_err(int eval, const char *fmt, ...)
{
int err_errno = errno;
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, ": %s\n", strerror(err_errno));
exit(eval);
}
static void c12r_errx(int eval, const char *fmt, ...)
{
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
exit(eval);
}
static void start_test(const char *what, const char *why)
{
if (like_a_libtool) {
printf("%s%s... ", what, why);
fflush(stdout);
}
}
static void end_test(bool result)
{
if (like_a_libtool)
printf("%s\n", result ? "yes" : "no");
}
static size_t fcopy(FILE *fsrc, FILE *fdst)
{
char buffer[BUFSIZ];
size_t rsize, wsize;
size_t copied = 0;
while ((rsize = fread(buffer, 1, BUFSIZ, fsrc)) > 0) {
wsize = fwrite(buffer, 1, rsize, fdst);
copied += wsize;
if (wsize != rsize)
break;
}
return copied;
}
static char *grab_stream(FILE *file)
{
size_t max, ret, size = 0;
char *buffer;
max = BUFSIZ;
buffer = malloc(max);
while ((ret = fread(buffer+size, 1, max - size, file)) == max - size) {
size += ret;
buffer = realloc(buffer, max *= 2);
}
size += ret;
if (ferror(file))
c12r_err(EXIT_TROUBLE_RUNNING, "reading from command");
buffer[size] = '\0';
return buffer;
}
static char *run(const char *cmd, int *exitstatus)
{
static const char redir[] = " 2>&1";
size_t cmdlen;
char *cmdredir;
FILE *cmdout;
char *ret;
cmdlen = strlen(cmd);
cmdredir = malloc(cmdlen + sizeof(redir));
memcpy(cmdredir, cmd, cmdlen);
memcpy(cmdredir + cmdlen, redir, sizeof(redir));
cmdout = popen(cmdredir, "r");
if (!cmdout)
c12r_err(EXIT_TROUBLE_RUNNING, "popen \"%s\"", cmdredir);
free(cmdredir);
ret = grab_stream(cmdout);
*exitstatus = pclose(cmdout);
return ret;
}
static char *connect_args(const char *argv[], const char *outflag,
const char *files)
{
unsigned int i;
char *ret;
size_t len = strlen(outflag) + strlen(files) + 1;
for (i = 1; argv[i]; i++)
len += 1 + strlen(argv[i]);
ret = malloc(len);
len = 0;
for (i = 1; argv[i]; i++) {
strcpy(ret + len, argv[i]);
len += strlen(argv[i]);
if (argv[i+1] || *outflag)
ret[len++] = ' ';
}
strcpy(ret + len, outflag);
len += strlen(outflag);
strcpy(ret + len, files);
return ret;
}
static struct test *find_test(const char *name)
{
unsigned int i;
for (i = 0; tests[i].name; i++) {
if (strcmp(tests[i].name, name) == 0)
return &tests[i];
}
c12r_errx(EXIT_BAD_TEST, "Unknown test %s", name);
abort();
}
#define PRE_BOILERPLATE "/* Test program generated by configurator. */\n"
#define MAIN_START_BOILERPLATE \
"int main(int argc, char *argv[]) {\n" \
" (void)argc;\n" \
" (void)argv;\n"
#define USE_FUNC_BOILERPLATE "(void)func;\n"
#define MAIN_BODY_BOILERPLATE "return 0;\n"
#define MAIN_END_BOILERPLATE "}\n"
static bool run_test(const char *cmd, struct test *test)
{
char *output, *newcmd;
FILE *outf;
int status;
if (test->done)
return test->answer;
if (test->depends) {
size_t len;
const char *deps = test->depends;
char *dep;
/* Space-separated dependencies, could be ! for inverse. */
while ((len = strcspn(deps, " ")) != 0) {
bool positive = true;
if (deps[len]) {
dep = strdup(deps);
dep[len] = '\0';
} else {
dep = (char *)deps;
}
if (dep[0] == '!') {
dep++;
positive = false;
}
if (run_test(cmd, find_test(dep)) != positive) {
test->answer = false;
test->done = true;
return test->answer;
}
if (deps[len])
free(dep);
deps += len;
deps += strspn(deps, " ");
}
}
outf = fopen(INPUT_FILE, verbose > 1 ? "w+" : "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING, "creating %s", INPUT_FILE);
fprintf(outf, "%s", PRE_BOILERPLATE);
if (strstr(test->style, "INSIDE_MAIN")) {
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "OUTSIDE_MAIN")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_FUNC")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", USE_FUNC_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_EVERYTHING")) {
fprintf(outf, "%s", test->fragment);
} else
c12r_errx(EXIT_BAD_TEST, "Unknown style for test %s: %s",
test->name, test->style);
if (verbose > 1) {
fseek(outf, 0, SEEK_SET);
fcopy(outf, stdout);
}
fclose(outf);
newcmd = strdup(cmd);
if (test->flags) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->flags) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->flags);
if (verbose > 1)
printf("Extra flags line: %s", newcmd);
}
if (test->link) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->link) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->link);
if (verbose > 1)
printf("Extra link line: %s", newcmd);
}
start_test("checking for ", test->desc);
output = run(newcmd, &status);
free(newcmd);
if (status != 0 || strstr(output, "warning")) {
if (verbose)
printf("Compile %s for %s, status %i: %s\n",
status ? "fail" : "warning",
test->name, status, output);
if (strstr(test->style, "EXECUTE")
&& !strstr(test->style, "MAY_NOT_COMPILE"))
c12r_errx(EXIT_BAD_TEST,
"Test for %s did not compile:\n%s",
test->name, output);
test->answer = false;
free(output);
} else {
/* Compile succeeded. */
free(output);
/* We run INSIDE_MAIN tests for sanity checking. */
if (strstr(test->style, "EXECUTE")
|| strstr(test->style, "INSIDE_MAIN")) {
output = run("." DIR_SEP OUTPUT_FILE, &status);
if (!strstr(test->style, "EXECUTE") && status != 0)
c12r_errx(EXIT_BAD_TEST,
"Test for %s failed with %i:\n%s",
test->name, status, output);
if (verbose && status)
printf("%s exited %i\n", test->name, status);
free(output);
}
test->answer = (status == 0);
}
test->done = true;
end_test(test->answer);
if (test->answer && test->overrides) {
struct test *override = find_test(test->overrides);
override->done = true;
override->answer = true;
}
return test->answer;
}
static char *any_field(char **fieldname)
{
char buf[1000];
for (;;) {
char *p, *eq;
if (!fgets(buf, sizeof(buf), stdin))
return NULL;
p = buf;
/* Ignore whitespace, lines starting with # */
while (*p == ' ' || *p == '\t')
p++;
if (*p == '#' || *p == '\n')
continue;
eq = strchr(p, '=');
if (!eq)
c12r_errx(EXIT_BAD_INPUT, "no = in line: %s", p);
*eq = '\0';
*fieldname = strdup(p);
p = eq + 1;
if (strlen(p) && p[strlen(p)-1] == '\n')
p[strlen(p)-1] = '\0';
return strdup(p);
}
}
static char *read_field(const char *name, bool compulsory)
{
char *fieldname, *value;
value = any_field(&fieldname);
if (!value) {
if (!compulsory)
return NULL;
c12r_errx(EXIT_BAD_INPUT, "Could not read field %s", name);
}
if (strcmp(fieldname, name) != 0)
c12r_errx(EXIT_BAD_INPUT,
"Expected field %s not %s", name, fieldname);
return value;
}
/* Test descriptions from stdin:
* Lines starting with # or whitespace-only are ignored.
*
* First three non-ignored lines must be:
* var=<varname>
* desc=<description-for-autotools-style>
* style=OUTSIDE_MAIN DEFINES_FUNC INSIDE_MAIN DEFINES_EVERYTHING EXECUTE MAY_NOT_COMPILE
*
* Followed by optional lines:
* depends=<space-separated-testnames, ! to invert>
* link=<extra args for link line>
* flags=<extra args for compile line>
* overrides=<testname-to-force>
*
* Finally a code line, either:
* code=<oneline> OR
* code=
* <lines of code>
* <end-comment>
*
* And <end-comment> looks like this next comment: */
/*END*/
static bool read_test(struct test *test)
{
char *field, *value;
char buf[1000];
memset(test, 0, sizeof(*test));
test->name = read_field("var", false);
if (!test->name)
return false;
test->desc = read_field("desc", true);
test->style = read_field("style", true);
/* Read any optional fields. */
while ((value = any_field(&field)) != NULL) {
if (strcmp(field, "depends") == 0)
test->depends = value;
else if (strcmp(field, "link") == 0)
test->link = value;
else if (strcmp(field, "flags") == 0)
test->flags = value;
else if (strcmp(field, "overrides") == 0)
test->overrides = value;
else if (strcmp(field, "code") == 0)
break;
else
c12r_errx(EXIT_BAD_INPUT, "Unknown field %s in %s",
field, test->name);
}
if (!value)
c12r_errx(EXIT_BAD_INPUT, "Missing code in %s", test->name);
if (strlen(value) == 0) {
/* Multiline program, read to END comment */
while (fgets(buf, sizeof(buf), stdin) != 0) {
size_t n;
if (strncmp(buf, "/*END*/", 7) == 0)
break;
n = strlen(value);
value = realloc(value, n + strlen(buf) + 1);
strcpy(value + n, buf);
n += strlen(buf);
}
}
test->fragment = value;
return true;
}
static void read_tests(size_t num_tests)
{
while (read_test(tests + num_tests)) {
num_tests++;
tests = realloc(tests, num_tests * sizeof(tests[0]));
}
}
int main(int argc, const char *argv[])
{
char *cmd;
unsigned int i;
const char *default_args[]
= { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL };
const char *outflag = DEFAULT_OUTPUT_EXE_FLAG;
const char *configurator_cc = NULL;
const char *orig_cc;
const char *varfile = NULL;
const char *headerfile = NULL;
bool extra_tests = false;
FILE *outf;
if (argc > 0)
progname = argv[0];
while (argc > 1) {
if (strcmp(argv[1], "--help") == 0) {
printf("Usage: configurator [-v] [--var-file=<filename>] [-O<outflag>] [--configurator-cc=<compiler-for-tests>] [--autotools-style] [--extra-tests] [<compiler> <flags>...]\n"
" <compiler> <flags> will have \"<outflag> <outfile> <infile.c>\" appended\n"
"Default: %s %s %s\n",
DEFAULT_COMPILER, DEFAULT_FLAGS,
DEFAULT_OUTPUT_EXE_FLAG);
exit(0);
}
if (strncmp(argv[1], "-O", 2) == 0) {
argc--;
argv++;
outflag = argv[1] + 2;
if (!*outflag) {
fprintf(stderr,
"%s: option requires an argument -- O\n",
argv[0]);
exit(EXIT_BAD_USAGE);
}
} else if (strcmp(argv[1], "-v") == 0) {
argc--;
argv++;
verbose++;
} else if (strcmp(argv[1], "-vv") == 0) {
argc--;
argv++;
verbose += 2;
} else if (strncmp(argv[1], "--configurator-cc=", 18) == 0) {
configurator_cc = argv[1] + 18;
argc--;
argv++;
} else if (strncmp(argv[1], "--var-file=", 11) == 0) {
varfile = argv[1] + 11;
argc--;
argv++;
} else if (strcmp(argv[1], "--autotools-style") == 0) {
like_a_libtool = true;
argc--;
argv++;
} else if (strncmp(argv[1], "--header-file=", 14) == 0) {
headerfile = argv[1] + 14;
argc--;
argv++;
} else if (strcmp(argv[1], "--extra-tests") == 0) {
extra_tests = true;
argc--;
argv++;
} else if (strcmp(argv[1], "--") == 0) {
break;
} else if (argv[1][0] == '-') {
c12r_errx(EXIT_BAD_USAGE, "Unknown option %s", argv[1]);
} else {
break;
}
}
if (argc == 1)
argv = default_args;
/* Copy with NULL entry at end */
tests = calloc(sizeof(base_tests)/sizeof(base_tests[0]) + 1,
sizeof(base_tests[0]));
memcpy(tests, base_tests, sizeof(base_tests));
if (extra_tests)
read_tests(sizeof(base_tests)/sizeof(base_tests[0]));
orig_cc = argv[1];
if (configurator_cc)
argv[1] = configurator_cc;
cmd = connect_args(argv, outflag, OUTPUT_FILE " " INPUT_FILE);
if (like_a_libtool) {
start_test("Making autoconf users comfortable", "");
sleep(1);
end_test(1);
}
for (i = 0; tests[i].name; i++)
run_test(cmd, &tests[i]);
free(cmd);
remove(OUTPUT_FILE);
remove(INPUT_FILE);
if (varfile) {
FILE *vars;
if (strcmp(varfile, "-") == 0)
vars = stdout;
else {
start_test("Writing variables to ", varfile);
vars = fopen(varfile, "a");
if (!vars)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", varfile);
}
for (i = 0; tests[i].name; i++)
fprintf(vars, "%s=%u\n", tests[i].name, tests[i].answer);
if (vars != stdout) {
if (fclose(vars) != 0)
c12r_err(EXIT_TROUBLE_RUNNING,
"Closing %s", varfile);
end_test(1);
}
}
if (headerfile) {
start_test("Writing header to ", headerfile);
outf = fopen(headerfile, "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", headerfile);
} else
outf = stdout;
fprintf(outf, "/* Generated by CCAN configurator */\n"
"#ifndef CCAN_CONFIG_H\n"
"#define CCAN_CONFIG_H\n");
fprintf(outf, "#ifndef _GNU_SOURCE\n");
fprintf(outf, "#define _GNU_SOURCE /* Always use GNU extensions. */\n");
fprintf(outf, "#endif\n");
fprintf(outf, "#define CCAN_COMPILER \"%s\"\n", orig_cc);
cmd = connect_args(argv + 1, "", "");
fprintf(outf, "#define CCAN_CFLAGS \"%s\"\n", cmd);
free(cmd);
fprintf(outf, "#define CCAN_OUTPUT_EXE_CFLAG \"%s\"\n\n", outflag);
/* This one implies "#include <ccan/..." works, eg. for tdb2.h */
fprintf(outf, "#define HAVE_CCAN 1\n");
for (i = 0; tests[i].name; i++)
fprintf(outf, "#define %s %u\n", tests[i].name, tests[i].answer);
fprintf(outf, "#endif /* CCAN_CONFIG_H */\n");
if (headerfile) {
if (fclose(outf) != 0)
c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", headerfile);
end_test(1);
}
return 0;
}
|
omp_section_lastprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp section lastprivate directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp section lastprivate</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_section_lastprivate</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int i0 = -1;
int sum = 0;
int i;
int sum0 = 0;
</ompts:orphan:vars>
int known_sum;
i0 = -1;
sum = 0;
#pragma omp parallel
{
<ompts:orphan>
#pragma omp sections <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> private(i,sum0)
{
#pragma omp section
{
sum0 = 0;
for (i = 1; i < 400; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
} /* end of section */
#pragma omp section
{
sum0 = 0;
for(i = 400; i < 700; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
}
#pragma omp section
{
sum0 = 0;
for(i = 700; i < 1000; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
}
} /* end of sections*/
</ompts:orphan>
} /* end of parallel*/
known_sum = (999 * 1000) / 2;
return ((known_sum == sum) && (i0 == 999) );
}
</ompts:testcode>
</ompts:test>
|
GB_unop__minv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int32_int32)
// op(A') function: GB (_unop_tran__minv_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 32) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(24*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(24*t3+Nx+11,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
decomp.h | /*!
* Software SPAMS v2.2 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*
*
* \file
* toolbox decomp
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File decomp.h
* \brief Contains sparse decomposition algorithms
* It requires the toolbox linalg */
#ifndef DECOMP_H
#define DECOMP_H
#include <utils.h>
static char low='l';
static char nonUnit='n';
/* **************************
* Greedy Forward Selection
* **************************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * based on Cholesky decompositions
/// * parallel
/// * optimized for a large number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *L, const T* eps, const T* lambda, const bool vecL = false,
const bool vecEps = false, const bool Lambda=false, const int numThreads=-1,
Matrix<T>* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp,
Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs,
Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& RUn, T& normX, const T* eps, const int* L, const T* lambda,
T* path = NULL);
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda = 0);
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm,
Vector<T>& tmp, Matrix<T>& Un, Matrix<T>& Undn, Matrix<T>& Unds,
Matrix<T>& Gs, Vector<T>& Rdn, const AbstractMatrix<T>& G, Vector<INTM>&
ind, Vector<T>& RUn, T& normX, const T eps, const int L, const T lambda);*/
/* **************
* LARS - Lasso
* **************/
/// Defines different types of problem,
/// - constraint on the l1 norm of the coefficients
/// - constraint on the reconstruction error
/// - l1-sparsity penalty
enum constraint_type { L1COEFFS, L2ERROR, PENALTY, SPARSITY, L2ERROR2, PENALTY2,FISTAMODE};
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2 = 0, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY,
const bool pos = false, const bool ols = false, const int numThreads=-1,
Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1, Matrix<T>* path = NULL, const int length_path=-1);
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2=0, constraint_type mode = PENALTY, const bool pos = false,
const int numThreads = -1);
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads = -1);
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdn, Vector<T>& Xdn, Vector<T>& A,
Vector<T>& u, Vector<T>& sig,
Vector<T>& av, Vector<T>& RUn, Matrix<T>& Un,
Matrix<T>& Unds, Matrix<T>& Gs,
Matrix<T>& Gsa, Matrix<T>& workT, Matrix<T>& R,
const AbstractMatrix<T>& G,T& normX,
Vector<int>& ind,Vector<T>& coeffs,const T constraint,
const bool ols = false,
const bool pos =false,
constraint_type mode = L1COEFFS,
T* path = NULL, int length_path=-1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false,
T* pr_path = NULL, int length_path = -1);
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint, const bool pos = false);
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos = false);
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos, Vector<T>& Rdn, INTM* ind,
T* coeffs, Vector<T>& sig, Vector<T>& av,
Vector<T>& Xdn, Vector<T>& RUn,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm);
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads = -1);
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax=500,
const T tol = 0.5, const int numThreads=-1);
/// coreIST
template <typename T>
void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T thrs, const int itermax = 500,
const T tol = 0.5);
template <typename T>
void coreISTW(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs, const Vector<T>& weights,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtR, Vector<T>& coeffs,
const T normX2,
const T thrs, const int itermax = 500,
const T tol = 0.5);
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax = 500,
const T tol = 0.5, const int numThreads = -1);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat,
const T thrs,
const int itermax=500,
const T tol = 0.5);
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtD,
Matrix<T>& alphat, const T normR,
const T eps,
const int itermax=500,
const T tol = 0.5);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat);
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& coeffs_tmp);
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T* pr_eps, const bool adapt=false,
const int numThreads=-1);
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps, const int numThreads=-1);
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& vM,
Vector<INTM>& rv, const int L, const T eps);
/* *********************
* Implementation of OMP
* *********************/
/// Forward Selection (or Orthogonal matching pursuit)
/// Address the problem of:
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_0 <= L or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_0
/// s.t. ||\X_i-D\alpha_i||_2^2 <= epsilon
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void omp(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
const int* pL, const T* peps, const T* pLambda,
const bool vecL, const bool vecEps,
const bool vecLambda, const int numThreads, Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const INTM M = X.n();
const INTM K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<T>& Rdn=RdnT[numT];
D.multTrans(Xi,Rdn);
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
}
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void omp_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
const int *pL, const T* peps, const T* pLambda, const bool vecL,
const bool vecEps, const bool vecLambda, const int numThreads,
Matrix<T>* path) {
int L;
if (!vecL) {
L=*pL;
} else {
Vector<int> vL(const_cast<int*>(pL),X.n());
L=vL.maxval();
}
spalpha.clear();
if (L <= 0) return;
const int M = X.n();
const int K = D.n();
L = MIN(X.m(),MIN(L,K));
Matrix<T> vM(L,M);
Matrix<INTM> rM(L,M);
ProdMatrix<T> G(D, K < 25000 && M > 10);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* scoresT=new Vector<T>[NUM_THREADS];
Vector<T>* normT=new Vector<T>[NUM_THREADS];
Vector<T>* tmpT=new Vector<T>[NUM_THREADS];
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
XmaskT[i].resize(X.m());
scoresT[i].resize(K);
normT[i].resize(K);
tmpT[i].resize(K);
RdnT[i].resize(K);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndnT[i].resize(K,L);
UndsT[i].resize(L,L);
GsT[i].resize(K,L);
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<INTM> ind;
rM.refCol(i,ind);
ind.set(-1);
Vector<T> RUn;
vM.refCol(i,RUn);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<T>& Rdn=RdnT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
D.multTrans(Xi,Rdn);
T normX = Xi.nrm2sq();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,G,ind,RUn, normX, vecEps ? peps+i : peps,
vecL ? pL+i : pL, vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],Rdn);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(T(1e-10));
T eps_mask= (vecEps ? *(peps+i) : *peps)*XmaskT[numT].n()/Xi.n();
coreORMP(scoresT[numT],normT[numT],tmpT[numT],
UnT[numT],UndnT[numT],UndsT[numT],
GsT[numT],Rdn,GT[numT],ind,RUn,
normX, &eps_mask, vecL ? pL+i : pL,
vecLambda ? pLambda+i : pLambda,
path && i==0 ? path->rawX() : NULL);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](scoresT);
delete[](normT);
delete[](tmpT);
delete[](RdnT);
delete[](UnT);
delete[](UndnT);
delete[](UndsT);
delete[](GsT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function of omp
template <typename T>
void coreORMPB(Vector<T>& RtD, const AbstractMatrix<T>& G, Vector<INTM>& ind,
Vector<T>& coeffs, T& normX, const int L, const T eps, const T lambda) {
const int K = G.n();
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
ind.set(-1);
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,RtD,G,ind,coeffs,normX,&eps,&L,&lambda);
};
/// Auxiliary function of omp
template <typename T>
void coreORMP(Vector<T>& scores, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T* peps, const int* pL, const T* plambda,
T* path) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
//T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
if (path)
memset(path,0,K*L*sizeof(T));
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
//for (int k = 0; k<j; ++k) prUn[j*L+k]=0.0;
//prUn[j*L+j]=T(1.0);
// for (int k = 0; k<j; ++k) prUnds[k*L+j]=prUndn[k*K+currentInd];
// MGS algorithm, Update Un
// int iter = norm[currentInd] < 0.5 ? 2 : 1;
//int iter=1;
// for (int k = 0; k<iter; ++k) {
/// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,prUn+j*L+l,1,prUnds+l*L+l,1);
// T scal = -prUnds[l*L+j];
// cblas_axpy<T>(l+1,scal,prUn+l*L,1,prUn+j*L,1);
// }
// }
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
if (path) {
T* last_path=path+(L-1)*K;
cblas_copy<T>(j+1,prRUn,1,last_path,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,prUn,L,last_path,1);
for (int k = 0; k<=j; ++k) {
path[j*K+ind[k]]=last_path[k];
}
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
// prUnds[j*L+j] = prUndn[j*K+currentInd];
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
if (path) {
memset(path+(L-1)*K,0,L*sizeof(T));
for (int k = 0; k<j; ++k) {
path[(j-1)*K+ind[k]]=prRUn[k];
}
}
};
/// Auxiliary function of omp
/*template <typename T>
void coreORMPWeighted(Vector<T>& scores, Vector<T>& weights, Vector<T>& norm, Vector<T>& tmp, Matrix<T>& Un,
Matrix<T>& Undn, Matrix<T>& Unds, Matrix<T>& Gs, Vector<T>& Rdn,
const AbstractMatrix<T>& G,
Vector<INTM>& ind, Vector<T>& RUn,
T& normX, const T peps, const int pL, const T plambda) {
const T eps = abs<T>(*peps);
const int L = MIN(*pL,Gs.n());
const T lambda=*plambda;
if ((normX <= eps) || L == 0) return;
const int K = scores.n();
scores.copy(Rdn);
scores.div(weights);
norm.set(T(1.0));
Un.setZeros();
// permit unsafe low level access
T* const prUn = Un.rawX();
T* const prUnds = Unds.rawX();
T* const prUndn = Undn.rawX();
T* const prGs = Gs.rawX();
T* const prRUn= RUn.rawX();
int j;
for (j = 0; j<L; ++j) {
const int currentInd=scores.fmax();
if (norm[currentInd] < 1e-8) {
ind[j]=-1;
break;
}
const T invNorm=T(1.0)/sqrt(norm[currentInd]);
const T RU=Rdn[currentInd]*invNorm;
const T delta = RU*RU;
if (delta < 2*lambda) {
break;
}
RUn[j]=RU;
normX -= delta;
ind[j]=currentInd;
prUn[j*L+j]=-T(1.0);
cblas_copy<T>(j,prUndn+currentInd,K,prUn+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,prUn,L,prUn+j*L,1);
cblas_scal<T>(j+1,-invNorm,prUn+j*L,1);
if (j == L-1 || (normX <= eps)) {
++j;
break;
}
// update the variables Gs, Undn, Unds, Rdn, norm, scores
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prUn+j*L,1,
T(0.0),prUndn+j*K,1);
Vector<T> Undnj;
Undn.refCol(j,Undnj);
Rdn.add(Undnj,-RUn[j]);
tmp.sqr(Undnj);
norm.sub(tmp);
scores.sqr(Rdn);
scores.div(norm);
scores.div(weights);
for (int k = 0; k<=j; ++k) scores[ind[k]]=T();
}
// compute the final coefficients
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j,prUn,L,prRUn,1);
};*/
/* **************
* LARS - Lasso
* **************/
/// Implementation of LARS-Lasso for solving
/// \forall i, \min_{\alpha_i} ||X_i-D\alpha_i||_2^2
/// s.t. ||\alphai||_1 <= constraint or
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= T
/// Optionally, the solution might be positive (boolean pos), and a
/// Least-Square can be solved as a post-processing step.
/// L is a maximum number of coefficients.
/// This function is
/// * efficient (Cholesky-based)
/// * parallel
/// * optimized for a big number of signals (precompute the Gramm matrix
template <typename T>
void lasso(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T lambda, const T lambda2, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
ProdMatrix<T> G(D, X.n() > 10 && D.n() < 50000);
G.addDiag(MAX(lambda2,1e-10));
ProdMatrix<T> DtX(D,X,false);
lasso(X,G,DtX,spalpha,L,lambda,mode,pos,ols,numThreads,path,length_path);
}
template <typename T>
void lasso(const Data<T>& X, const AbstractMatrix<T>& G,
const AbstractMatrix<T>& DtX, SpMatrix<T>& spalpha,
int L, const T lambda, constraint_type mode,
const bool pos, const bool ols, const int numThreads,
Matrix<T>* path, const int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
Vector<T>* RdnT=new Vector<T>[NUM_THREADS];
Vector<T>* XdnT =new Vector<T>[NUM_THREADS];
Vector<T>* AT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* sigT=new Vector<T>[NUM_THREADS];
Vector<T>* avT=new Vector<T>[NUM_THREADS];
Vector<T>* RUnT = new Vector<T>[NUM_THREADS];
Matrix<T>* UnT=new Matrix<T>[NUM_THREADS];
Matrix<T>* RT=new Matrix<T>[NUM_THREADS];
Matrix<T>* UndsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GsaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
RdnT[i].resize(K);
if (ols) XdnT[i].resize(K);
AT[i].resize(K);
uT[i].resize(L);
sigT[i].resize(L);
avT[i].resize(L);
if (ols) RUnT[i].resize(L);
UnT[i].resize(L,L);
UnT[i].setZeros();
UndsT[i].resize(L,L);
UndsT[i].setZeros();
GsT[i].resize(K,L);
GsaT[i].resize(L,L);
workT[i].resize(K,2);
RT[i].resize(L,L);
}
Vector<T> norms;
X.norm_2sq_cols(norms);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
coeffs.setZeros();
Vector<T>& Rdn=RdnT[numT];
DtX.copyCol(i,Rdn);
coreLARS(Rdn,XdnT[numT], AT[numT], uT[numT], sigT[numT], avT[numT],
RUnT[numT], UnT[numT], UndsT[numT], GsT[numT], GsaT[numT],
workT[numT],RT[numT],G,normX, ind,coeffs,lambda,ols,pos,
mode,path && i==0 ? path->rawX() : NULL, length_path);
}
delete[](RdnT);
delete[](XdnT);
delete[](AT);
delete[](uT);
delete[](sigT);
delete[](avT);
delete[](RUnT);
delete[](UnT);
delete[](RT);
delete[](UndsT);
delete[](GsT);
delete[](GsaT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS(Vector<T>& Rdnv, Vector<T>& Xdnv, Vector<T>& Av,
Vector<T>& uv, Vector<T>& sigv, Vector<T>& avv, Vector<T>& RUnv,
Matrix<T>& Unm, Matrix<T>& Undsm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& workm, Matrix<T>& Rm,
const AbstractMatrix<T>& Gm,T& normX,
Vector<INTM>& indv,Vector<T>& coeffsv,const T constraint,
const bool ols,const bool pos, constraint_type mode,
T* path, int length_path) {
if (mode == L2ERROR && normX < constraint) return;
const int LL = Gsm.n();
const int K = Gsm.m();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
// permit unsafe fast low level access
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const A = Av.rawX();
T* const u = uv.rawX();
T* const sig = sigv.rawX();
//T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const work = workm.rawX();
//T* const G = Gm.rawX();
//T* const R = Rm.rawX();
INTM* ind = indv.rawX();
T* coeffs = coeffsv.rawX();
coeffsv.setZeros();
indv.set(-1);
if (ols) Xdnv.copy(Rdnv);
int currentInd= pos ? Rdnv.max() : Rdnv.fmax();
bool newAtom=true;
T Cmax = 0;
int iter=1;
T thrs = 0.0;
// INTM* const ind_orig = ind;
// T* const coeffs_orig = coeffs;
int j;
for (j = 0; j<L; ++j) {
if (newAtom) {
ind[j]=currentInd;
if (pos) {
Cmax = Rdn[currentInd];
sig[j]=1.0;
} else {
Cmax = abs<T>(Rdn[currentInd]);
sig[j] = SIGN(Rdn[currentInd]);
}
for (int k = 0; k<=j; ++k) Un[j*L+k]=0.0;
Un[j*L+j]=1.0;
Gm.extract_rawCol(currentInd,Gs+K*j);
for (int k = 0; k<j; ++k) Gs[K*j+ind[k]] *= sig[k];
if (sig[j] < 0) {
Rdn[currentInd]=-Rdn[currentInd];
if (ols) Xdn[currentInd]=-Xdn[currentInd];
cblas_scal<T>(K,sig[j],Gs+K*j,1);
cblas_scal<T>(j+1,sig[j],Gs+currentInd,K);
}
cblas_copy<T>(j+1,Gs+currentInd,K,Gsa+j*L,1);
for (int k = 0; k<j; ++k) Gsa[k*L+j]=Gsa[j*L+k];
// <d_j,d_i>
cblas_copy<T>(j,Gsa+j*L,1,Unds+j,L);
// <U_j final,d_i>
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,Unds+j,L);
// norm2
T norm2=Gsa[j*L+j];
for (int k = 0; k<j; ++k) norm2 -= Unds[k*L+j]*Unds[k*L+j];
if (norm2 < 1e-15) {
ind[j]=-1;
// cerr << "bad exit" << endl;
break;
}
// int iter2 = norm2 < 0.5 ? 2 : 1;
// for(int k = 0; k<iter2; ++k) {
// for (int l = 0; l<j; ++l) {
// T scal=-cblas_dot<T>(j+1-l,Un+j*L+l,1,Unds+l*L+l,1);
// cblas_axpy<T>(l+1,scal,Un+l*L,1,Un+j*L,1);
// }
// }
Un[j*L+j]=-T(1.0);
cblas_copy<T>(j,Unds+j,L,Un+j*L,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,j,Un,L,Un+j*L,1);
/// Un is the orthogonalized vectors in the D basis
T invNorm=1.0/sqrt(norm2);
cblas_scal<T>(j+1,-invNorm,Un+j*L,1);
Unds[j*L+j]=cblas_dot<T>(j+1,Un+j*L,1,Gsa+j*L,1);
}
for (int k = 0; k<=j; ++k) u[k]=T(1.0);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasTrans,CblasNonUnit,
j+1,Un,L,u,1);
T a = T(1.0)/cblas_nrm2<T>(j+1,u,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,u,1);
cblas_scal<T>(j+1,a,u,1);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),Gs,K,u,1,T(0.0),A,1);
T potentNorm=0.0;
if (!ols) {
for (int k = 0; k<=j; ++k) potentNorm += Rdn[ind[k]]*u[k];
}
if (pos) {
for (int k = 0; k<K; ++k) {
T diff = a-A[k];
work[k]= diff <= 0 ? INFINITY : (Cmax-Rdn[k])/diff;
}
for (int k = 0; k<=j; ++k) {
work[ind[k]]=INFINITY;
}
for (int k = 0; k<K; ++k)
if (work[k] <=0) work[k]=INFINITY;
currentInd =cblas_iamin<T>(K,work,1);
} else {
memset(work,0,2*K*sizeof(T));
for (int k = 0; k<=j; ++k) {
const int index=2*ind[k];
work[index]=INFINITY;
work[index+1]=INFINITY;
}
for (int k = 0; k<K; ++k) {
const int index=2*k;
if (!work[index]) {
const T diff1=a-A[k];
work[index]= diff1 <= 0 ? INFINITY : (Cmax-Rdn[k])/diff1;
const T diff2=a+A[k];
work[index+1]=diff2 <= 0 ? INFINITY : (Cmax+Rdn[k])/diff2;
}
}
currentInd =cblas_iamin<T>(2*K,work,1);
}
T gamma=work[currentInd];
T gammaMin=0;
int minBasis=0;
//if (j == L-1) gamma=potentNorm;
if (mode == PENALTY) {
gamma=MIN(gamma,(Cmax-constraint)/a);
}
// if (j > 0) {
vDiv<T>(j+1,coeffs,u,work);
cblas_scal<T>(j+1,-T(1.0),work,1);
/// voir pour petites valeurs
for (int k=0; k<=j; ++k)
if (coeffs[k]==0 || work[k] <=0) work[k]=INFINITY;
minBasis=cblas_iamin<T>(j+1,work,1);
gammaMin=work[minBasis];
if (gammaMin < gamma) gamma=gammaMin;
// }
if (mode == L1COEFFS) {
T Tu = 0.0;
for (int k = 0; k<=j; ++k) Tu += u[k];
if (Tu > EPSILON)
gamma= MIN(gamma,(constraint-thrs)/Tu);
thrs+=gamma*Tu;
}
// compute the norm of the residdual
if (ols == 0) {
const T t = gamma*gamma - 2*gamma*potentNorm;
if (t > 0 || isnan(t) || isinf(t)) {
// cerr << "bad bad exit" << endl;
// cerr << t << endl;
ind[j]=-1;
break;
}
normX += t;
} else {
// plan the last orthogonal projection
if (newAtom) {
RUn[j]=0.0;
for (int k = 0; k<=j; ++k) RUn[j] += Xdn[ind[k]]*
Un[j*L+k];
normX -= RUn[j]*RUn[j];
}
}
// Update the coefficients
cblas_axpy<T>(j+1,gamma,u,1,coeffs,1);
if (pos) {
for (int k = 0; k<j+1; ++k)
if (coeffs[k] < 0) coeffs[k]=0;
}
cblas_axpy<T>(K,-gamma,A,1,Rdn,1);
if (!pos) currentInd/= 2;
if (path) {
for (int k = 0; k<=j; ++k)
path[iter*K+ind[k]]=coeffs[k]*sig[k];
}
if (gamma == gammaMin) {
downDateLasso<T>(j,minBasis,normX,ols,pos,Rdnv,ind,coeffs,sigv,
avv,Xdnv, RUnv, Unm, Gsm, Gsam,Undsm,Rm);
newAtom=false;
Cmax=abs<T>(Rdn[ind[0]]);
--j;
} else {
newAtom=true;
}
++iter;
if (mode == PENALTY) {
thrs=abs<T>(Rdn[ind[0]]);
}
if ((j == L-1) ||
(mode == PENALTY && (thrs - constraint < 1e-15)) ||
(mode == L1COEFFS && (thrs - constraint > -1e-15)) ||
(newAtom && mode == L2ERROR && (normX - constraint < 1e-15)) ||
(normX < 1e-15) ||
(iter >= length_path)) {
// cerr << "exit" << endl;
// PRINT_F(thrs)
// PRINT_F(constraint)
// PRINT_F(normX)
break;
}
}
if (ols) {
cblas_copy<T>(j+1,RUn,1,coeffs,1);
cblas_trmv<T>(CblasColMajor,CblasUpper,CblasNoTrans,CblasNonUnit,
j+1,Un,L,coeffs,1);
}
vMul<T>(j+1,coeffs,sig,coeffs);
};
/// Auxiliary functoni for coreLARS (Cholesky downdate)
template <typename T>
inline void downDateLasso(int& j,int& minBasis,T& normX,const bool ols,
const bool pos,
Vector<T>& Rdnv, INTM* ind,
T* coeffs, Vector<T>& sigv, Vector<T>& avv,
Vector<T>& Xdnv, Vector<T>& RUnv,Matrix<T>& Unm, Matrix<T>& Gsm,
Matrix<T>& Gsam, Matrix<T>& Undsm, Matrix<T>& Rm) {
const int L = Gsm.n();
const int K = Gsm.m();
T* const Rdn = Rdnv.rawX();
T* const Xdn = Xdnv.rawX();
T* const sig = sigv.rawX();
T* const av = avv.rawX();
T* const RUn = RUnv.rawX();
T* const Un = Unm.rawX();
T* const Unds = Undsm.rawX();
T* const Gs = Gsm.rawX();
T* const Gsa = Gsam.rawX();
T* const R = Rm.rawX();
int indB=ind[minBasis];
if (!pos && sig[minBasis] < 0) {
// Update Rdn
Rdn[indB]=-Rdn[indB];
if (ols) Xdn[indB]=-Xdn[indB];
}
int num=j-minBasis;
for (int k = 0; k<num*num;++k) R[k]=0.0;
for (int k = 0; k<num; ++k) R[k*num+k]=1.0;
// Update Un
for (int k = minBasis+1; k<=j; ++k) {
T a = -Un[k*L+minBasis]/Un[minBasis*L+minBasis];
av[k-minBasis-1] = a;
cblas_axpy<T>(minBasis,a,Un+minBasis*L,1,Un+k*L,1);
}
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Un+k*L,1,Un+(k-1)*L,1);
cblas_copy<T>(num,Un+k*L+minBasis+1,1,Un+(k-1)*L+minBasis,1);
}
T alpha=1.0;
T alphab,gamma;
for (int k = 0; k<num; ++k) {
alphab=alpha+av[k]*av[k];
R[k*num+k]=sqrt(alphab/alpha);
gamma=av[k]*R[k*num+k]/alphab;
alpha=alphab;
cblas_copy<T>(num-k-1,av+k+1,1,R+k*num+k+1,1);
cblas_scal<T>(num-k-1,gamma,R+k*num+k+1,1);
}
if (num > 0) {
trtri<T>(low,nonUnit,num,R,num);
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j,num,T(1.0),R,num,Un+minBasis*L,L);
}
// Update Unds
for (int k = minBasis+1; k<=j; ++k)
cblas_axpy<T>(j-minBasis,av[k-minBasis-1],Unds+minBasis*L+minBasis+1,1,
Unds+k*L+minBasis+1,1);
for (int k = 0; k<minBasis; ++k)
for (int l = minBasis+1; l<=j; ++l)
Unds[k*L+l-1]=Unds[k*L+l];
for (int k = minBasis+1; k<=j; ++k)
cblas_copy<T>(j-minBasis,Unds+k*L+minBasis+1,1,Unds+(k-1)*L+minBasis,1);
if (num > 0)
cblas_trmm<T>(CblasColMajor,CblasRight,CblasLower,CblasTrans,CblasNonUnit,
j-minBasis,num,T(1.0),R,num,Unds+minBasis*L+minBasis,L);
for (int k = minBasis+1; k<=j; ++k)
for (int l = 0; l<k; ++l) Unds[k*L+l]=0.0;
// Update Gs
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(K,Gs+k*K,1,Gs+(k-1)*K,1);
}
if (!pos && sig[minBasis] < T(0.0)) cblas_scal<T>(j,T(-1.0),Gs+indB,K);
// Update Gsa
for (int k = minBasis+1; k<=j; ++k) {
cblas_copy<T>(minBasis,Gsa+k*L,1,Gsa+(k-1)*L,1);
cblas_copy<T>(j-minBasis,Gsa+k*L+minBasis+1,1,Gsa+(k-1)*L+minBasis,1);
}
for (int k = 0; k<minBasis; ++k) {
for (int l = minBasis+1; l<=j; ++l) Gsa[k*L+l-1]=Gsa[k*L+l];
}
// Update sig
for (int k = minBasis+1; k<=j && !pos; ++k) sig[k-1]=sig[k];
// Update ind
for (int k = minBasis+1; k<=j; ++k) ind[k-1]=ind[k];
ind[j]=-1;
for (int k = minBasis+1; k<=j; ++k) coeffs[k-1]=coeffs[k];
coeffs[j]=0.0;
if (ols) {
// Update RUn and normX
for (int k = minBasis; k<=j; ++k)
normX += RUn[k]*RUn[k];
for (int k = minBasis; k<j; ++k) {
RUn[k]=0.0;
for (int l = 0; l<=k; ++l) RUn[k] += Xdn[ind[l]]*
Un[k*L+l];
normX -= RUn[k]*RUn[k];
}
}
// Update j
--j;
}
/// second implementation using matrix inversion lemma
template <typename T>
void lassoReweighted(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const T sigma,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
const int iterR = 30;
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* DtRRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* weightsT=new Vector<T>[NUM_THREADS];
Vector<int>* inddT=new Vector<int>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
DtRRT[i].resize(K);
uT[i].resize(K);
weightsT[i].resize(K);
GT[i].resize(K,K);
inddT[i].resize(K);
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normXo = Xi.nrm2sq();
T normX = normXo;
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
Vector<T>& DtRR = DtRRT[numT];
D.multTrans(Xi,DtR);
DtRR.copy(DtR);
coreLARS2(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
//Matrix<T>& GG = GT[numT];
Vector<T>& weights = weightsT[numT];
//Vector<int>& indd = inddT[numT];
for (int j = 0; j<iterR; ++j) {
const T sig = sigma*pow(0.7,iterR-1-j);
weights.set(sig);
for (int k = 0; k<K; ++k) {
if (ind[k] != -1) {
weights[ind[k]] = MAX(1e-4,sig*exp(-sig*abs<T>(coeffs[k])));
} else {
break;
}
}
DtRR.copy(DtR);
normX=normXo;
coreLARS2W(DtRR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,weights,
ind,workT[numT],normX,mode,constraint,pos);
}
}
delete[](DtRT);
delete[](DtRRT);
delete[](inddT);
delete[](uT);
delete[](weightsT);
delete[](GsT);
delete[](GT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
}
template <typename T>
void lassoWeight(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
//ProdMatrix<T> G(D, K < 25000 && M > 10);
ProdMatrix<T> G(D, K < 50000);
//Matrix<T> G;
//D.XtX(G);
G.addDiag(1e-10);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
D.multTrans(Xi,DtR);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lassoWeightPreComputed(const Matrix<T>& X, const Matrix<T>& G, const Matrix<T>& DtR, const Matrix<T>& weights,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = G.n();
Matrix<T> vM;
Matrix<int> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
T normX = Xi.nrm2sq();
Vector<int> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtRi=DtRT[numT];
DtR.copyCol(i,DtRi);
Vector<T> we;
weights.refCol(i,we);
coreLARS2W(DtRi,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,we,
ind,workT[numT],normX,mode,constraint,pos);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
/// second implementation using matrix inversion lemma
template <typename T>
void lasso_mask(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha, const Matrix<bool>& mask,
int L, const T constraint,const T lambda2, constraint_type mode, const bool pos,
const int numThreads) {
spalpha.clear();
const int M = X.n();
const int K = D.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
int NUM_THREADS=init_omp(numThreads);
ProdMatrix<T> G(D,K < 25000 && M > 10);
G.addDiag(MAX(lambda2,1e-10));
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Vector<T>* XmaskT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
ProdMatrix<T>* GT=new ProdMatrix<T>[NUM_THREADS];
Matrix<T>* DmaskT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DmaskT[i].resize(D.m(),D.n());
DtRT[i].resize(K);
uT[i].resize(K);
XmaskT[i].resize(X.m());
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i,Xi);
Vector<bool> maski;
mask.refCol(i,maski);
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
if (maski.allfalse()) continue;
if (maski.alltrue()) {
T normX = Xi.nrm2sq();
D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos);
} else {
D.copyMask(DmaskT[numT],maski);
Xi.copyMask(XmaskT[numT],maski);
T constraint_mask = mode == PENALTY || mode == L2ERROR ? constraint*XmaskT[numT].n()/Xi.n() : constraint;
T normX = XmaskT[numT].nrm2sq();
DmaskT[numT].multTrans(XmaskT[numT],DtR);
GT[numT].setMatrices(DmaskT[numT],false);
GT[numT].addDiag(MAX(lambda2,T(1e-10)));
coreLARS2(DtR,GT[numT],
GsT[numT],GaT[numT],invGsT[numT],uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint_mask,pos);
DmaskT[numT].setm(D.m());
DmaskT[numT].setn(D.n());
XmaskT[numT].setn(X.m());
}
}
delete[](GT);
delete[](XmaskT);
delete[](DmaskT);
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void lasso2(const Matrix<T>& X, const Matrix<T>& D, SpMatrix<T>& spalpha,
int L, const T constraint, const T lambda2, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
ProdMatrix<T> G(D,X.n() > 10 && D.n() < 50000);
ProdMatrix<T> DtX(D,X,false);
G.addDiag(MAX(lambda2,1e-10));
lasso2(X,G,DtX,spalpha,L,constraint,mode,pos,numThreads,path, length_path);
}
template <typename T>
void lasso2(const Data<T>& X, const AbstractMatrix<T>& G, const AbstractMatrix<T>& DtX,
SpMatrix<T>& spalpha,
int L, const T constraint, constraint_type mode, const bool pos,
const int numThreads, Matrix<T>* path, int length_path) {
spalpha.clear();
const INTM M = X.n();
const INTM K = G.n();
Matrix<T> vM;
Matrix<INTM> rM;
vM.resize(L,M);
rM.resize(L,M);
if (L <= 0) return;
if (path) path->setZeros();
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* uT=new Vector<T>[NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
uT[i].resize(K);
uT[i].setZeros();
GsT[i].resize(L,L);
invGsT[i].resize(L,L);
GaT[i].resize(K,L);
workT[i].resize(K,3);
workT[i].setZeros();
}
INTM i;
Vector<T> norms;
X.norm_2sq_cols(norms);
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
// Vector<T> Xi;
// X.refCol(i,Xi);
// T normX = Xi.nrm2sq();
T normX = norms[i];
Vector<INTM> ind;
rM.refCol(i,ind);
Vector<T> coeffs;
vM.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
DtX.copyCol(i,DtR);
//D.multTrans(Xi,DtR);
coreLARS2(DtR,G,GsT[numT],GaT[numT],invGsT[numT],
uT[numT],coeffs,
ind,workT[numT],normX,mode,constraint,pos,
path && i==0 ? path->rawX() : NULL,length_path);
}
delete[](DtRT);
delete[](uT);
delete[](GsT);
delete[](GaT);
delete[](invGsT);
delete[](workT);
/// convert the sparse matrix into a proper format
spalpha.convert(vM,rM,K);
};
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, const Vector<T>& weights, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2W(DtR,G,Gs,Ga,invGs,u,v,weights,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Vector<T>& coeffs, T normX,
const constraint_type mode,
const T constraint, const bool pos) {
const INTM p = G.m();
const INTM L = p;
Vector<T> v;
v.resize(L);
Vector<INTM> r;
r.resize(L);
Vector<T> u;
u.resize(p);
Matrix<T> Gs;
Gs.resize(L,L);
Matrix<T> invGs;
invGs.resize(L,L);
Matrix<T> Ga;
Ga.resize(p,L);
Matrix<T> work;
work.resize(p,3);
coreLARS2(DtR,G,Gs,Ga,invGs,u,v,r,work,normX,mode,constraint,pos);
coeffs.setZeros();
for (int i = 0; i< L; ++i) {
if (r[i] < 0) break;
coeffs[r[i]]=v[i];
};
};
/// Auxiliary function for lasso
template <typename T>
void coreLARS2(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos,
T* path, int length_path) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
if (length_path <= 1) length_path=4*L;
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
INTM* const pr_ind = ind.rawX();
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// cerr << "Add " << currentInd << endl;
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
// cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
memcpy(pr_invGs+i*LL,pr_u,i*sizeof(T));
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? T(1.0) : T(-1.0);
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
// PRINT_F(step_max)
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
memcpy(pr_work+K,pr_work+2*K,K*sizeof(T));
memcpy(pr_work,pr_work+K,K*sizeof(T));
// cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
// cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
// work.print("work");
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
// work.print("work");
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
// work.print("work");
// coeffs.print("coeffs");
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_u[j] : -pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
step_max2 = MIN(current_correlation,step_max2);
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
step_max2 = MIN(current_correlation,step_max2);
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
if (pos) {
for (int j = 0; j<i+1; ++j)
if (pr_coeffs[j] < 0) pr_coeffs[j]=0;
}
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (path) {
for (int k = 0; k<=i; ++k)
path[iter*K+ind[k]]=pr_coeffs[k];
}
// Choose next action
if (step == step_max) {
// cerr << "Remove " << pr_ind[first_zero] << endl;
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
if ((iter >= length_path-1) || abs(step) < 1e-15 ||
step == step_max2 || (normX < 1e-15) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-15) ||
(mode == L1COEFFS && (constraint-thrs < 1e-15))) {
break;
}
}
}
/// Auxiliary function for lasso
template <typename T>
void coreLARS2W(Vector<T>& DtR, const AbstractMatrix<T>& G,
Matrix<T>& Gs,
Matrix<T>& Ga,
Matrix<T>& invGs,
Vector<T>& u,
Vector<T>& coeffs,
const Vector<T>& weights,
Vector<INTM>& ind,
Matrix<T>& work,
T& normX,
const constraint_type mode,
const T constraint,
const bool pos) {
const int LL = Gs.n();
const int K = G.n();
const int L = MIN(LL,K);
coeffs.setZeros();
ind.set(-1);
T* const pr_Gs = Gs.rawX();
T* const pr_invGs = invGs.rawX();
T* const pr_Ga = Ga.rawX();
// T* const pr_G = G.rawX();
T* const pr_work = work.rawX();
T* const pr_u = u.rawX();
T* const pr_DtR = DtR.rawX();
T* const pr_coeffs = coeffs.rawX();
T* const pr_weights = weights.rawX();
INTM* const pr_ind = ind.rawX();
DtR.div(weights);
// Find the most correlated element
int currentInd = pos ? DtR.max() : DtR.fmax();
if (mode == PENALTY && abs(DtR[currentInd]) < constraint) return;
if (mode == L2ERROR && normX < constraint) return;
bool newAtom=true;
int i;
int iter=0;
T thrs = 0;
for (i = 0; i<L; ++i) {
++iter;
if (newAtom) {
pr_ind[i]=currentInd;
// Update upper part of Gs and Ga
G.extract_rawCol(pr_ind[i],pr_Ga+i*K);
for (int j = 0; j<=i; ++j)
pr_Gs[i*LL+j]=pr_Ga[i*K+pr_ind[j]];
// Update inverse of Gs
if (i == 0) {
pr_invGs[0]=T(1.0)/pr_Gs[0];
} else {
cblas_symv<T>(CblasColMajor,CblasUpper,i,T(1.0),
pr_invGs,LL,pr_Gs+i*LL,1,T(0.0),pr_u,1);
const T schur =
T(1.0)/(pr_Gs[i*LL+i]-cblas_dot<T>(i,pr_u,1,pr_Gs+i*LL,1));
pr_invGs[i*LL+i]=schur;
cblas_copy<T>(i,pr_u,1,pr_invGs+i*LL,1);
cblas_scal<T>(i,-schur,pr_invGs+i*LL,1);
cblas_syr<T>(CblasColMajor,CblasUpper,i,schur,pr_u,1,
pr_invGs,LL);
}
}
// Compute the path direction
for (int j = 0; j<=i; ++j)
pr_work[j]= pr_DtR[pr_ind[j]] > 0 ? weights[pr_ind[j]] : -weights[pr_ind[j]];
cblas_symv<T>(CblasColMajor,CblasUpper,i+1,T(1.0),pr_invGs,LL,
pr_work,1,T(0.0),pr_u,1);
// Compute the step on the path
T step_max = INFINITY;
int first_zero = -1;
for (int j = 0; j<=i; ++j) {
T ratio = -pr_coeffs[j]/pr_u[j];
if (ratio > 0 && ratio <= step_max) {
step_max=ratio;
first_zero=j;
}
}
T current_correlation = abs<T>(pr_DtR[pr_ind[0]]);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,i+1,T(1.0),pr_Ga,
K,pr_u,1,T(0.0),pr_work+2*K,1);
vDiv<T>(K,pr_work+2*K,pr_weights,pr_work+2*K);
cblas_copy<T>(K,pr_work+2*K,1,pr_work+K,1);
cblas_copy<T>(K,pr_work+2*K,1,pr_work,1);
for (int j = 0; j<=i; ++j) {
pr_work[pr_ind[j]]=INFINITY;
pr_work[pr_ind[j]+K]=INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j] = ((pr_work[j] < INFINITY) && (pr_work[j] > T(-1.0))) ? (pr_DtR[j]+current_correlation)/(T(1.0)+pr_work[j]) : INFINITY;
}
for (int j = 0; j<K; ++j) {
pr_work[j+K] = ((pr_work[j+K] < INFINITY) && (pr_work[j+K] < T(1.0))) ? (current_correlation-pr_DtR[j])/(T(1.0)-pr_work[j+K]) : INFINITY;
}
if (pos) {
for (int j = 0; j<K; ++j) {
pr_work[j]=INFINITY;
}
}
int index = cblas_iamin<T>(2*K,pr_work,1);
T step = pr_work[index];
// Choose next element
currentInd = index % K;
// compute the coefficients of the polynome representing normX^2
T coeff1 = 0;
for (int j = 0; j<=i; ++j)
coeff1 += pr_DtR[pr_ind[j]] > 0 ? pr_weights[pr_ind[j]]*pr_u[j] :
-pr_weights[pr_ind[j]]*pr_u[j];
T coeff2 = 0;
for (int j = 0; j<=i; ++j)
coeff2 += pr_DtR[pr_ind[j]]*pr_u[j]*pr_weights[pr_ind[j]];
T coeff3 = normX-constraint;
T step_max2;
if (mode == PENALTY) {
step_max2 = current_correlation-constraint;
} else if (mode == L2ERROR) {
/// L2ERROR
const T delta = coeff2*coeff2-coeff1*coeff3;
step_max2 = delta < 0 ? INFINITY : (coeff2-sqrt(delta))/coeff1;
} else {
/// L1COEFFS
step_max2 = coeff1 < 0 ? INFINITY : (constraint-thrs)/coeff1;
}
step = MIN(MIN(step,step_max2),step_max);
if (step == INFINITY) break; // stop the path
// Update coefficients
cblas_axpy<T>(i+1,step,pr_u,1,pr_coeffs,1);
// Update correlations
cblas_axpy<T>(K,-step,pr_work+2*K,1,pr_DtR,1);
// Update normX
normX += coeff1*step*step-2*coeff2*step;
// Update norm1
thrs += step*coeff1;
if (step == step_max) {
/// Downdate, remove first_zero
/// Downdate Ga, Gs, invGs, ind, coeffs
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(K,pr_Ga+(j+1)*K,1,pr_Ga+j*K,1);
pr_ind[j]=pr_ind[j+1];
pr_coeffs[j]=pr_coeffs[j+1];
}
pr_ind[i]=-1;
pr_coeffs[i]=0;
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_Gs+(j+1)*LL,1,pr_Gs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_Gs+(j+1)*LL+first_zero+1,1,
pr_Gs+j*LL+first_zero,1);
}
const T schur = pr_invGs[first_zero*LL+first_zero];
cblas_copy<T>(first_zero,pr_invGs+first_zero*LL,1,pr_u,1);
cblas_copy<T>(i-first_zero,pr_invGs+(first_zero+1)*LL+first_zero,LL,
pr_u+first_zero,1);
for (int j = first_zero; j<i; ++j) {
cblas_copy<T>(first_zero,pr_invGs+(j+1)*LL,1,pr_invGs+j*LL,1);
cblas_copy<T>(i-first_zero,pr_invGs+(j+1)*LL+first_zero+1,1,
pr_invGs+j*LL+first_zero,1);
}
cblas_syr<T>(CblasColMajor,CblasUpper,i,T(-1.0)/schur,
pr_u,1,pr_invGs,LL);
newAtom=false;
i=i-2;
} else {
newAtom=true;
}
// Choose next action
if (iter > 4*L || abs(step) < 1e-10 ||
step == step_max2 || (normX < 1e-10) ||
(i == (L-1)) ||
(mode == L2ERROR && normX - constraint < 1e-10) ||
(mode == L1COEFFS && (constraint-thrs < 1e-10))) {
break;
}
}
}
/* ************************
* Iterative thresholding
* ************************/
/// Implementation of IST for solving
/// \forall i, \min_{\alpha_i} ||\alpha_i||_1
/// s.t. ||\X_i-D\alpha_i||_2^2 <= constraint or
/// \forall i, \min_{\alpha_i} constraint*||\alpha_i||_1 + ...
/// ... ||\X_i-D\alpha_i||_2^2 <= lambda
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
SpMatrix<T>& spalpha, T lambda, constraint_type mode,
const int itermax,
const T tol,
const int numThreads) {
Matrix<T> alpha;
spalpha.toFull(alpha);
spalpha.clear();
ist(X,D,alpha,lambda,mode,itermax,tol,numThreads);
alpha.toSparse(spalpha);
}
template <typename T>
void ist(const Matrix<T>& X, const Matrix<T>& D,
Matrix<T>& alpha, T lambda, constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
int K=D.n();
int M=X.n();
alpha.resize(K,M);
if (!D.isNormalized()) {
cerr << "Current implementation of IST does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
//CachedProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
//ProdMatrix<T> G(D, K < 20000 && M*K/10 > K);
Matrix<T> G;
D.XtX(G);
// for (int i = 0; i<K; ++i) G[i*K+i] += 1e-6;
G.addDiag(1e-12);
ProdMatrix<T> DtX(D,X,false);
int NUM_THREADS=init_omp(numThreads);
Vector<T>* DtRT= new Vector<T>[NUM_THREADS];
SpVector<T>* spAlphaT= new SpVector<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
DtRT[i].resize(K);
spAlphaT[i].resize(K);
};
int i;
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> coeffs;
alpha.refCol(i,coeffs);
Vector<T>& DtR=DtRT[numT];
SpVector<T>& spAlpha=spAlphaT[numT];
T norm1 = coeffs.asum();
// Compute DtR
DtX.copyCol(i,DtR);
Vector<T> Xi;
X.refCol(i,Xi);
T normX2 = Xi.nrm2sq();
if (norm1 > EPSILON) {
coeffs.toSparse(spAlpha);
G.mult(spAlpha,DtR,-1.0,1.0);
}
if (mode == PENALTY) {
coreIST(G,DtR,coeffs,lambda,itermax,tol);
} else {
coreISTconstrained(G,DtR,coeffs,normX2,lambda,itermax,tol);
}
}
delete[](DtRT);
delete[](spAlphaT);
}
/*template <typename T>
inline void generalCD(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T lambda, const int itermax, const T tol) {
Vector<T> diag;
G.diag(diag);
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
for (int iter=0; iter < itermax; ++iter) {
if (iter % 5 == 0) {
T eps1=DtRv.fmaxval()/lambda-1;
if (eps1 <= tol) {
T eps2=1e10;
for (int jj=0; jj<K; ++jj) {
if (coeffs[jj] > 0) {
eps2=MIN(DtR[jj],eps2);
} else if (coeffs[jj] < 0) {
eps2=MIN(-DtR[jj],eps2);
}
}
eps2=-(eps2/lambda-1);
if (eps2 <= tol)
break;
}
}
for (int j = 0; j <K; ++j) {
T crit=DtR[j]+coeffs[j]*diag[j];
if (crit > lambda) {
T diff=coeffs[j];
coeffs[j]=(crit-lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (crit < -lambda) {
T diff=coeffs[j];
coeffs[j]=(crit+lambda)/diag[j];
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=T();
}
}
}
}*/
template <typename T>
inline void coreIST(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
vAdd(K,DtR,coeffs,DtR);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}
template <typename T>
inline void coreISTW(const Matrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,const Vector<T>& weightsv,
const T lambda, const int itermax,
const T tol) {
T opt=0;
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const weights = weightsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
const T nrm = G(j,j);
const T u = DtR[j]/nrm+coeffs[j];
const T thrs = lambda*weights[j]/nrm;
if (u > thrs) {
T diff=coeffs[j];
coeffs[j]=u-thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (u < -thrs) {
T diff=coeffs[j];
coeffs[j]=u+thrs;
diff-=coeffs[j];
G.add_rawCol(j,DtR,diff);
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
} else if (coeffs[j]) {
G.add_rawCol(j,DtR,coeffs[j]);
coeffs[j]=0;
//cblas_axpy(K,diff,prG+j*K,1,DtR,1);
}
}
if (iter % 10 == 0) {
opt=0;
for (int j = 0; j <K; ++j) {
if (coeffs[j] > 0) {
opt=MAX(opt,abs<T>(T(1.0)-DtR[j]/(weights[j]*lambda)));
} else if (coeffs[j] < 0) {
opt=MAX(opt,abs<T>(T(1.0)+DtR[j]/(lambda*weights[j])));
} else {
opt=MAX(opt,abs<T>(DtR[j]/(lambda*weights[j]))-T(1.0));
}
}
if (opt < tol) break;
}
}
}
/*template <typename T>
inline void coreIST_unnormalized(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>& coeffsv,
const T thrs, const int itermax,
const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
const T lambda_init=thrs;
T maxDtR = DtRv.fmaxval();
T norm1=coeffsv.asum();
T lambda=lambda_init;
DtRv.add(coeffsv);
// vAdd(K,DtR,coeffs,DtR);
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (DtR[j] > lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]-lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (DtR[j] < -lambda) {
T diff=coeffs[j];
coeffs[j]=DtR[j]+lambda;
diff-=coeffs[j];
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
} else if (coeffs[j]) {
T diff=coeffs[j];
coeffs[j]=T();
DtR[j]-=diff;
G.add_rawCol(j,DtR,diff);
}
}
if (iter % 5 == 1) {
vSub(K,DtR,coeffs,DtR);
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<K; ++j) {
if (coeffs[j]) {
norm1 += abs(coeffs[j]);
DtRa += DtR[j]*coeffs[j];
}
}
DtRv.add(coeffs);
const T kappa = -DtRa+norm1*maxDtR;
if (abs(lambda - maxDtR) < tol && kappa <= tol)
break;
}
}
}*/
/// coreIST constrained
template <typename T>
void coreISTconstrained(const AbstractMatrix<T>& G, Vector<T>& DtRv, Vector<T>&
coeffsv, const T normX2, const T eps, const int itermax, const T tol) {
const int K = G.n();
T* const coeffs = coeffsv.rawX();
T* const DtR = DtRv.rawX();
// T* const prG = G.rawX();
T err = normX2;
T norm1 = coeffsv.asum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = DtRv.fmaxval();
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
Vector<int> indices(K);
indices.set(-1);
int* const pr_indices=indices.rawX();
int count;
for (int iter=0; iter < itermax; ++iter) {
count=0;
T old_err = err;
for (int j = 0; j <K; ++j) {
// Soft-thresholding
T old_coeff = coeffs[j];
T diff = DtR[j]+old_coeff;
if (diff > lambda) {
coeffs[j] = diff - lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else if (diff < - lambda) {
coeffs[j] = diff + lambda;
err+=lambdasq-DtR[j]*DtR[j];
pr_indices[count++]=j;
} else {
coeffs[j]=T();
if (old_coeff) {
err+=diff*diff-DtR[j]*DtR[j];
}
}
// Update DtR
diff = old_coeff-coeffs[j];
if (diff) {
G.add_rawCol(j,DtR,diff);
//cblas_axpy<T>(K,old_coeff-coeffs[j],prG+j*K,1,DtR,1);
}
}
maxDtR = DtRv.fmaxval();
norm1 =T();
T DtRa = T();
for (int j = 0; j<count; ++j) {
const int ind = pr_indices[j];
norm1 += abs(coeffs[ind]);
DtRa += DtR[ind]*coeffs[ind];
}
if (norm1-DtRa/maxDtR <= current_tol) {
const bool change = ((old_err > eps) && err < eps+current_tol) ||
(old_err < eps && err > eps-current_tol);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// ist for group Lasso
template <typename T>
void ist_groupLasso(const Matrix<T>* XT, const Matrix<T>& D,
Matrix<T>* alphaT, const int Ngroups,
const T lambda, const constraint_type mode,
const int itermax,
const T tol, const int numThreads) {
int K=D.n();
int n = D.m();
if (!D.isNormalized()) {
cerr << "Current implementation of block coordinate descent does not support non-normalized dictionaries" << endl;
return;
}
if (mode == L1COEFFS) {
std::cerr << "Mode not implemented" << std::endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
int NUM_THREADS=init_omp(numThreads);
Matrix<T>* RtDT = new Matrix<T>[NUM_THREADS];
Matrix<T>* alphatT = new Matrix<T>[NUM_THREADS];
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const Matrix<T>& X = XT[i];
int M = X.n();
Matrix<T>& alphat = alphatT[numT];
alphaT[i].transpose(alphat);
Matrix<T>& RtD = RtDT[numT];
X.mult(D,RtD,true,false);
Vector<T> col, col2;
T norm1 = alphat.asum();
T normX2 = 0;
if (!norm1) {
Vector<T> DtR_mean(K);
Vector<T> coeffs_mean(K);
coeffs_mean.setZeros();
RtD.meanRow(DtR_mean);
coeffs_mean.setZeros();
if (mode == PENALTY) {
coreIST(G,DtR_mean,coeffs_mean,lambda/T(2.0),itermax,tol);
} else {
Vector<T> meanVec(n);
X.meanCol(meanVec);
normX2=meanVec.nrm2sq();
coreISTconstrained(G,DtR_mean,coeffs_mean,normX2,
lambda,itermax,tol);
SpVector<T> spalpha(K);
normX2-=computeError(normX2,G,DtR_mean,coeffs_mean,spalpha);
normX2=X.normFsq()-M*normX2;
}
alphat.fillRow(coeffs_mean);
}
if (M > 1) {
for (int j = 0; j<K; ++j) {
alphat.refCol(j,col);
const T nrm=col.nrm2sq();
if (nrm) {
G.refCol(j,col2);
RtD.rank1Update(col,col2,T(-1.0));
}
}
if (mode == PENALTY) {
coreGroupIST(G,RtD,alphat,sqr<T>(M)*lambda/T(2.0),itermax,sqr<T>(M)*tol);
} else {
coreGroupISTConstrained(G,RtD,alphat,normX2,M*lambda,itermax,sqr<T>(M)*tol);
}
}
alphat.transpose(alphaT[i]);
}
delete[](RtDT);
delete[](alphatT);
};
template <typename T>
void coreGroupIST(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm,
const T thrs,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
const T lambda_init=thrs;
T lambda=lambda_init;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
for (int iter=0; iter < itermax; ++iter) {
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=5;
} else {
activate[j] = (activate[j] == 0) ? -10 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
if (iter % 5 == 4) {
T norm1=normsv.asum();
RtDm.norm_2sq_cols(normRtDv);
T maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if ((maxDtR - lambda) < (tol*maxDtR/norm1) && norm1-DtRa/maxDtR < tol) break;
}
}
};
/// Auxiliary function for ist_groupLasso
template <typename T>
void coreGroupISTConstrained(const Matrix<T>& G, Matrix<T>& RtDm,
Matrix<T>& coeffsm, const T normR,
const T eps,
const int itermax,
const T tol) {
const int K = G.n();
const int M = RtDm.m();
T* const prG = G.rawX();
T* const RtD = RtDm.rawX();
T* const coeffs = coeffsm.rawX();
T err = normR;
Vector<T> old_coeffv(M);
T* const old_coeff = old_coeffv.rawX();
Vector<T> normsv(K);
T* const norms = normsv.rawX();
coeffsm.norm_2_cols(normsv);
Vector<T> normRtDv(K);
RtDm.norm_2sq_cols(normRtDv);
Vector<int> activatev(K);
activatev.set(3);
int* const activate=activatev.rawX();
T norm1 = normsv.sum();
if (!norm1 && err <= eps) return;
T current_tol = 10.0*tol;
T maxDtR = sqr(normRtDv.maxval());
T lambda = maxDtR;
T lambdasq= lambda*lambda;
if (!norm1) {
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
for (int iter=0; iter < itermax; ++iter) {
T old_err = err;
for (int j = 0; j <K; ++j) {
if (activate[j] >= 0) {
if (norms[j]) {
cblas_copy(M,coeffs+j*M,1,old_coeff,1);
vAdd(M,coeffs+j*M,RtD+j*M,coeffs+j*M);
const T nrm = cblas_nrm2(M,coeffs+j*M,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
vSub(M,old_coeff,coeffs+j*M,old_coeff);
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
memset(coeffs+j*M,0,M*sizeof(T));
norms[j]=T();
err += cblas_dot(M,old_coeff,1,old_coeff,1)
+2*cblas_dot(M,old_coeff,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(1.0),old_coeff,1,prG+j*K,1,RtD,M);
--activate[j];
}
} else {
cblas_copy(M,RtD+j*M,1,old_coeff,1);
const T nrm = cblas_nrm2(M,old_coeff,1);
if (nrm > lambda) {
norms[j]=nrm-lambda;
cblas_copy(M,old_coeff,1,coeffs+j*M,1);
cblas_scal(M,norms[j]/nrm,coeffs+j*M,1);
err += cblas_dot(M,coeffs+j*M,1,coeffs+j*M,1)
-2*cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
cblas_ger(CblasColMajor,M,K,T(-1.0),coeffs+j*M,1,prG+j*K,1,RtD,M);
activate[j]=3;
} else {
activate[j] = (activate[j] == 0) ? -3 : activate[j]-1;
}
}
} else {
++activate[j];
}
}
norm1 = normsv.sum();
RtDm.norm_2sq_cols(normRtDv);
maxDtR = sqr(normRtDv.maxval());
T DtRa=T();
for (int j = 0; j<K; ++j) {
if (norms[j]) {
DtRa += cblas_dot(M,coeffs+j*M,1,RtD+j*M,1);
}
}
if (norm1-DtRa/maxDtR <= current_tol) {
const T tol_bis=current_tol*maxDtR;
const bool change = ((old_err > eps) && err < eps+tol_bis) ||
(old_err < eps && err > eps-tol_bis);
if (change) {
if (current_tol == tol) {
break;
} else {
current_tol = MAX(current_tol*0.5,tol);
}
}
lambdasq *= eps/err;
lambda=sqrt(lambdasq);
}
}
};
/// auxiliary function for ist_groupLasso
template <typename T>
T computeError(const T normX2,const Vector<T>& norms,
const Matrix<T>& G,const Matrix<T>& RtD,const Matrix<T>& alphat) {
T err2 = normX2;
Vector<T> col,col2;
for (int j = 0; j<G.n(); ++j) {
if (norms[j] > EPSILON) {
alphat.refCol(j,col);
RtD.refCol(j,col2);
err2 -= 2*col.dot(col2);
T add = 0.0;
for (int k = 0; k<j; ++k) {
if (norms[k] > EPSILON) {
alphat.refCol(k,col2);
add -= G(j,k)*col.dot(col2);
}
}
add += add - G(j,j)*col.nrm2sq();
err2 += add;
}
}
return err2;
}
/// auxiliary function for
template <typename T>
T computeError(const T normX2,
const Matrix<T>& G,const Vector<T>& DtR,const Vector<T>& coeffs,
SpVector<T>& spAlpha) {
coeffs.toSparse(spAlpha);
return normX2 -G.quad(spAlpha)-2*DtR.dot(spAlpha);
};
/* ******************
* Simultaneous OMP
* *****************/
template <typename T>
void somp(const Matrix<T>* X, const Matrix<T>& D, SpMatrix<T>* spalpha,
const int Ngroups, const int L, const T eps,const int numThreads) {
somp(X,D,spalpha,Ngroups,L,&eps,false,numThreads);
}
template <typename T>
void somp(const Matrix<T>* XT, const Matrix<T>& D, SpMatrix<T>* spalphaT,
const int Ngroups, const int LL, const T* eps, const bool adapt,
const int numThreads) {
if (LL <= 0) return;
const INTM K = D.n();
const INTM L = MIN(D.m(),MIN(LL,K));
if (!D.isNormalized()) {
cerr << "Current implementation of OMP does not support non-normalized dictionaries" << endl;
return;
}
/// compute the Gram Matrix G=D'D
Matrix<T> G;
D.XtX(G);
init_omp(numThreads);
int i;
#pragma omp parallel for private(i)
for (i = 0; i< Ngroups; ++i) {
const Matrix<T>& X = XT[i];
const INTM M = X.n();
SpMatrix<T>& spalpha = spalphaT[i];
spalpha.clear();
Vector<INTM> rv;
Matrix<T> vM;
T thrs = adapt ? eps[i] : M*(*eps);
coreSOMP(X,D,G,vM,rv,L,thrs);
spalpha.convert2(vM,rv,K);
}
}
template <typename T>
void coreSOMP(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& G,
Matrix<T>& v,
Vector<INTM>& r, const int L, const T eps) {
const int K = G.n();
const int n = D.m();
const int M = X.n();
const bool big_mode = M*K*(n+L) > 2*(M*n*n+K*n*(n+L));
r.resize(L);
r.set(-1);
v.resize(0,X.n());
if (M == 1) {
Vector<T> scores(K);
Vector<T> norm(K);
Vector<T> tmp(K);
Matrix<T> Un(L,L);
Un.setZeros();
Matrix<T> Undn(K,L);
Matrix<T> Unds(L,L);
Matrix<T> Gs(K,L);
Vector<T> Rdn(K);
Vector<T> Xt(X.rawX(),n);
D.multTrans(Xt,Rdn);
Vector<T> RUn(L);
T normX = Xt.nrm2sq();
T lambda=0;
coreORMP(scores,norm,tmp,Un,Undn,Unds,Gs,Rdn,G,r,RUn,normX,&eps,&L,&lambda);
int count=0;
for (int i = 0; i<L; ++i) {
if (r[i] == -1) break;
++count;
}
v.resize(count,X.n());
Vector<T> v1(v.rawX(),count);
Vector<T> v2(RUn.rawX(),count);
v1.copy(v2);
return;
}
Matrix<T> XXtD;
Matrix<T> XtD;
T E;
if (big_mode) {
Matrix<T> XXt;
X.XXt(XXt);
E = XXt.trace();
if (E < eps) return;
XXt.mult(D,XXtD);
} else {
E=X.normFsq();
if (E < eps) return;
X.mult(D,XtD,true);
}
Matrix<T> A(K,L);
A.setZeros();
Matrix<T> B(L,K);
B.setZeros();
Matrix<T> S(L,L);
S.setZeros();
Matrix<T> Fs(K,L);
Fs.setZeros();
Matrix<T> Gs(K,L);
Gs.setZeros();
Matrix<T> As(L,L);
As.setZeros();
Vector<T> tmp(K);
Vector<T> e(K);
G.diag(e);
Vector<T> f(K);
if (big_mode) {
for (int i = 0; i<K; ++i) {
Vector<T> di;
D.refCol(i,di);
Vector<T> di2;
XXtD.refCol(i,di2);
f[i]=di.dot(di2);
}
} else {
XtD.norm_2sq_cols(f);
}
Vector<T> c(L);
c.setZeros();
Vector<T> scores(K);
/// permit unsafe fast low level accesses
T* const prAs = As.rawX();
T* const prA = A.rawX();
T* const prS = S.rawX();
T* const prGs = Gs.rawX();
T* const prFs = Fs.rawX();
T* const prB = B.rawX();
T* const pr_c = c.rawX();
T* const pr_tmp = tmp.rawX();
int j;
for (j = 0; j<L; ++j) {
scores.copy(f);
scores.div(e);
for (int k = 0; k<j; ++k) scores[r[k]]=-1.0;
const int currentInd = scores.max();
const T invNorm=T(1.0)/sqrt(e[currentInd]);
if (invNorm > 1e3) {
j=j-1;
break;
}
r[j]=currentInd;
E -= scores[currentInd];
for (int k = 0; k<j; ++k) prS[j*L+k]=T();
prS[j*L+j]=T(1.0);
for (int k = 0; k<j; ++k) prAs[k*L+j]=prA[k*K+currentInd];
/// Cholesky update with partial reorthogonalization
int iter = invNorm > 1.41 ? 2 : 1;
for (int k = 0; k<iter; ++k) {
for (int l = 0; l<j; ++l) {
T scal = -cblas_dot<T>(j-l+1,prAs+l*L+l,1,prS+j*L+l,1);
cblas_axpy<T>(l+1,scal,prS+l*L,1,prS+j*L,1);
}
}
cblas_scal<T>(j+1,invNorm,prS+j*L,1);
if (j == L-1 || E <= eps) {
++j;
break;
}
/// Update e,f,scores,A,B,As,Bs,Fs,Gs,S,c
/// Gs,S,A,As, e, Fs, B,c
Vector<T> Gsj;
Gs.refCol(j,Gsj);
G.copyCol(currentInd,Gsj);
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prGs,K,prS+j*L,1,
T(0.0),prA+j*K,1);
prAs[j*L+j]=prA[j*K+currentInd];
Vector<T> Aj;
A.refCol(j,Aj);
tmp.sqr(Aj);
e.sub(tmp);
Vector<T> Fsj;
Fs.refCol(j,Fsj);
if (big_mode) {
Vector<T> di;
D.refCol(currentInd,di);
XXtD.multTrans(di,Fsj);
} else {
Vector<T> di;
XtD.refCol(currentInd,di);
XtD.multTrans(di,Fsj);
}
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j+1,T(1.0),prFs,K,prS+j*L,1,
T(0.0),prB+j,L);
for (int k = 0; k<j;++k) pr_c[k]=T();
for (int k = 0; k<=j;++k)
cblas_axpy<T>(j,prS[j*L+k],prB+r[k]*L,1,pr_c,1);
f.add(tmp,f[currentInd]*invNorm*invNorm);
if (j > 0) {
cblas_gemv<T>(CblasColMajor,CblasNoTrans,K,j,T(1.0),prA,K,pr_c,1,
T(0.0),pr_tmp,1);
} else {
tmp.setZeros();
}
cblas_axpy<T>(K,T(-1.0),prB+j,L,pr_tmp,1);
tmp.mult(tmp,Aj);
f.add(tmp,T(2.0));
}
A.clear();
B.clear();
Fs.clear();
Gs.clear();
As.clear();
if (j == 0) return;
Matrix<T> SSt;
S.upperTriXXt(SSt,j);
Matrix<T> Dg(n,j);
for (int i = 0; i<j;++i) {
Vector<T> Dgi;
Dg.refCol(i,Dgi);
D.copyCol(r[i],Dgi);
}
Matrix<T> SStDt;
SSt.mult(Dg,SStDt,false,true);
SStDt.mult(X,v);
};
#endif // DECOMP_H
|
8.norace5.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 200
#define N 200
int main() {
double A[M], B[M][N], C[N], sum = 0.0;
int id;
#pragma omp parallel for firstprivate(sum) lastprivate(id)
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
sum += B[i][j] * C[j];
}
A[i] = sum;
sum = 0.0;
id = omp_get_thread_num();
}
return id;
}
// CHECK: Region is Data Race Free.
// END
|
GB_unaryop__lnot_uint16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_int64
// op(A') function: GB_tran__lnot_uint16_int64
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_int64
(
uint16_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_emult_template.c | //------------------------------------------------------------------------------
// GB_emult_template: phase1 and phase2 for C=A.*B, C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Computes C=A.*B (no mask) or C<M>=A.*B (mask present and not complemented).
// Does not handle the case C<!M>=A.*B. The complemented mask is handled in
// GB_mask instead. If present, the mask M is assumed to be very sparse
// compared with A and B.
// phase1: does not compute C itself, but just counts the # of entries in each
// vector of C. Fine tasks compute the # of entries in their slice of a
// single vector of C, and the results are cumsum'd in GB_task_cumsum.
// phase2: computes C, using the counts computed by phase1.
{
// iB_first is unused if the operator is FIRST
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A, B, M, and C
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
const int64_t *restrict Mi = NULL ;
const GB_void *restrict Mx = NULL ;
GB_cast_function cast_M = NULL ;
size_t msize = 0 ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = M->x ;
cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ;
msize = M->type->size ;
}
#if defined ( GB_PHASE_2_OF_2 )
const GB_ATYPE *restrict Ax = A->x ;
const GB_ATYPE *restrict Bx = B->x ;
const int64_t *restrict Cp = C->p ;
const int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_CTYPE *restrict Cx = C->x ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j); phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (Ch == Ah) ? k :
((C_to_A == NULL) ? j : C_to_A [k]) ;
if (kA >= 0)
{
pA = Ap [kA] ;
pA_end = Ap [kA+1] ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1 ;
if (ajnz > 0)
{
iA_first = Ai [pA] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iA_last = -1 ;
if (ajnz > 0)
{
iA_last = Ai [pA_end-1] ;
}
#endif
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (Ch == Bh) ? k :
((C_to_B == NULL) ? j : C_to_B [k]) ;
if (kB >= 0)
{
pB = Bp [kB] ;
pB_end = Bp [kB+1] ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1 ;
if (bjnz > 0)
{
iB_first = Bi [pB] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iB_last = -1 ;
if (bjnz > 0)
{
iB_last = Bi [pB_end-1] ;
}
#endif
//------------------------------------------------------------------
// phase1: count nnz (C (:,j)); phase2: compute C(:,j)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (ajnz == 0 || bjnz == 0)
{
//--------------------------------------------------------------
// A(:,j) and/or B(:,j) are empty
//--------------------------------------------------------------
;
}
else if (iA_last < iB_first || iB_last < iA_first)
{
//--------------------------------------------------------------
// intersection of A(:,j) and B(:,j) is empty
//--------------------------------------------------------------
// the last entry of A(:,j) comes before the first entry
// of B(:,j), or visa versa
;
}
else
#endif
if (M == NULL)
{
if (adense && bdense)
{
//----------------------------------------------------------
// A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
Ci [pC + p] = p + iA_first ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// A(:,j) is dense, B(:,j) is sparse: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = Bi [pB + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + i - iA_first) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// A(:,j) is sparse, B(:,j) is dense: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = Ai [pA + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + i - iB_first) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//----------------------------------------------------------
// A(:,j) and B(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and B(:,j)
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// A(i,j) exists but not B(i,j)
pA++ ;
}
else if (iB < iA)
{
// B(i,j) exists but not A(i,j)
pB++ ;
}
else
{
// both A(i,j) and B(i,j) exist
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = iB ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
pA++ ;
pB++ ;
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
}
else
{
//--------------------------------------------------------------
// Mask is present
//--------------------------------------------------------------
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch == Mh)
{
// Ch is the same as Mh (a shallow copy), or both NULL
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = Mp [kM] ;
pM_end = Mp [kM+1] ;
}
}
//--------------------------------------------------------------
// C(:,j)<M(:,j) = A(:,j) .* B (:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) .* B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
int64_t apright = pA_end - 1 ;
bool afound ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
if (!afound) continue ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
int64_t bpright = pB_end - 1 ;
bool bfound ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
if (!bfound) continue ;
//----------------------------------------------------------
// C(i,j) = A(i,j) .* B(i,j)
//----------------------------------------------------------
// C (i,j) = A (i,j) .* B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
ethereum_fmt_plug.c | /*
* JtR format to crack password protected Ethereum Wallets.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ethereum;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ethereum);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16 // tuned on i7-6600U
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX 1 // hack, we can't use our simd pbkdf2 code for presale wallets because of varying salt
#include "pbkdf2_hmac_sha256.h"
#include "ethereum_common.h"
#include "escrypt/crypto_scrypt.h"
#include "KeccakHash.h"
#include "aes.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_NAME "Ethereum Wallet"
#define FORMAT_LABEL "ethereum"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 16
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(uint64_t)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)];
static custom_salt *cur_salt;
static union {
uint64_t dummy;
unsigned char data[8];
} dpad;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
memcpy(dpad.data, "\x02\x00\x00\x00\x00\x00\x00\x00", 8);
}
static void done(void)
{
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
static void set_salt(void *salt)
{
cur_salt = (custom_salt *)salt;
}
static void ethereum_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
if (cur_salt->type == 0) {
#ifdef SIMD_COEF_64
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
cur_salt->salt, cur_salt->saltlen,
cur_salt->iterations, master[i], 32,
0);
#endif
} else if (cur_salt->type == 1) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
crypto_scrypt((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
cur_salt->salt,
cur_salt->saltlen, cur_salt->N,
cur_salt->r, cur_salt->p,
master[i], 32);
} else if (cur_salt->type == 2) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
(unsigned char *)saved_key[index+i],
strlen(saved_key[index+i]),
2000, master[i], 16, 0);
}
if (cur_salt->type == 0 || cur_salt->type == 1) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
Keccak_HashInstance hash;
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); // delimitedSuffix is 0x06 for SHA-3, and 0x01 for Keccak
Keccak_HashUpdate(&hash, master[i] + 16, 16 * 8);
Keccak_HashUpdate(&hash, cur_salt->ct, cur_salt->ctlen * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);
}
} else {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
AES_KEY akey;
Keccak_HashInstance hash;
unsigned char iv[16];
unsigned char seed[4096];
int padbyte;
int datalen;
AES_set_decrypt_key(master[i], 128, &akey);
memcpy(iv, cur_salt->encseed, 16);
AES_cbc_encrypt(cur_salt->encseed + 16, seed, cur_salt->eslen - 16, &akey, iv, AES_DECRYPT);
if (check_pkcs_pad(seed, cur_salt->eslen - 16, 16) < 0) {
memset(crypt_out[index+i], 0, BINARY_SIZE);
continue;
}
padbyte = seed[cur_salt->eslen - 16 - 1];
datalen = cur_salt->eslen - 16 - padbyte;
if (datalen < 0) {
memset(crypt_out[index+i], 0, BINARY_SIZE);
continue;
}
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01);
Keccak_HashUpdate(&hash, seed, datalen * 8);
Keccak_HashUpdate(&hash, dpad.data, 1 * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_ethereum = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
ethereum_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
ethereum_common_valid,
fmt_default_split,
ethereum_get_binary,
ethereum_common_get_salt,
{
ethereum_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
ethereum_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
anneal.c | #include "anneal.h"
#include "cairo/visuals.h"
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
void build_stencil(graph_t *g, placement_t *p, int rows, int cols, int *stencil) {
for (int i = 0; i < rows * cols; ++i)
stencil[i] = -1;
for (int i = 0; i < g->vertices; ++i) {
int x = (int) p->coords[i];
int y = (int) p->coords[i + g->vertices];
stencil[x * cols + y] = i;
}
}
double sa_cost(graph_t *g, int *x, int *y) {
double cost = 0;
// Calculate the cost of a solution.
#pragma omp parallel for schedule(static, 16) reduction(+: cost)
for (int i = 0; i < g->vertices; ++i) {
// For each vertex, calculate the manhattan distance to all adjacent nodes.
// Only do so in the diagonal
for (int j = i + 1; j < g->vertices; ++j) {
cost += (abs(x[i] - x[j]) + abs(y[i] - y[j])) * abs(g->laplacian[i * g->vertices + j]);
}
}
return cost;
}
double sa_estimate_diff(graph_t *g, int *x, int *y, int elem, int x_rand, int y_rand, int old_elem, int old_x, int old_y) {
double cost = 0;
// Calculate the cost of a but just looking at the nodes that have changed.
// This is O(n) instead of O(n^2)
for (int i = 0; i < g->vertices; ++i) {
// Substract the old cost and add the new cost
if (i == elem)
continue;
// If i == the old element there is no change in cost
// If they did not have an edge, nothing changes
// Otherwise, we just swap them, distance is the same
if (i != old_elem) {
cost -= (abs(x[i] - old_x) + abs(y[i] - old_y)) * abs(g->laplacian[elem * g->vertices + i]);
cost += (abs(x[i] - x_rand) + abs(y[i] - y_rand)) * abs(g->laplacian[elem * g->vertices + i]);
if (old_elem != -1) {
cost -= (abs(x[i] - x_rand) + abs(y[i] - y_rand)) * abs(g->laplacian[old_elem * g->vertices + i]);
cost += (abs(x[i] - old_x) + abs(y[i] - old_y)) * abs(g->laplacian[old_elem * g->vertices + i]);
}
}
}
return cost;
}
char buffer[1024];
void simulated_annealing(graph_t *g, placement_t *p, int rows, int cols, sa_params_t params) {
srand(time(NULL));
double temp = params.initial_temperature;
// Auxiliary structures
int *stencil = malloc(rows * cols * sizeof(int));
build_stencil(g, p, rows, cols, stencil);
int *x = malloc(g->vertices * sizeof(int));
int *y = malloc(g->vertices * sizeof(int));
for (int i = 0; i < g->vertices; ++i) {
x[i] = (int) p->coords[i];
y[i] = (int) p->coords[i + g->vertices];
}
double cost = sa_cost(g, x, y);
printf("Initial cost: %.2f\n", cost);
int steps = 0;
while(temp > params.minimum_temperature) {
for (int k = 0; k < params.kmax; ++k) {
// Select 1 element at random.
int elem = rand() % g->vertices;
// Then select one place on the grid to move it to
int x_rand = rand() % rows;
int y_rand = rand() % cols;
// Save old elements
int old_elem = stencil[x_rand * cols + y_rand];
int old_x = x[elem];
int old_y = y[elem];
double diff = sa_estimate_diff(g, x, y, elem, x_rand, y_rand, old_elem, old_x, old_y);
if (diff < 0) {
// Keep
cost += diff;
} else {
// Between 0 and 1
double r = (double)rand() / (double)(RAND_MAX);
if (r < exp(-diff / temp)) {
cost += diff;
} else {
// Do not keep
continue;
}
}
// Swap
stencil[x_rand * cols + y_rand] = elem;
x[elem] = x_rand;
y[elem] = y_rand;
stencil[old_x * cols + old_y] = old_elem;
if (old_elem != -1) {
x[old_elem] = old_x;
y[old_elem] = old_y;
}
}
temp *= params.alpha;
steps++;
// if (steps % 10 == 0) {
// // Copy to the actual placement
// for (int i = 0; i < g->vertices; ++i) {
// p->coords[i] = x[i];
// p->coords[i + g->vertices] = y[i];
// }
// sprintf(buffer, "out/step_%05d.png", steps);
// visuals_png(g, p, rows, cols, buffer);
// }
}
printf("Final cost: %.2f\n", cost);
printf("Steps: %d\n", steps);
free(stencil);
// Copy to the actual placement
for (int i = 0; i < g->vertices; ++i) {
p->coords[i] = x[i];
p->coords[i + g->vertices] = y[i];
}
// visuals_png(g, p, rows, cols, "step_final.png");
free(x);
free(y);
} |
nodal_residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER)
#define KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* #include <unordered_set> */
/* #ifdef USE_GOOGLE_HASH */
/* #include "sparsehash/dense_hash_set" //included in external libraries */
/* #endif */
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "utilities/openmp_utils.h"
#include "includes/kratos_flags.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedBlockBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedBlockBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~NodalResidualBasedBlockBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
//#pragma omp parallel for firstprivate(nconditions, LHS_Contribution, RHS_Contribution, EquationId ) schedule(dynamic, 1024)
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl;
//for (int i = 0; i < A_size; i++)
// omp_destroy_lock(&lock_array[i]);
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
void BuildNodally(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
std::cout << "Build Nodally Continuity Equation" << std::endl;
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
/* const double start_build = OpenMPUtils::GetCurrentTime(); */
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
VectorType nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
const unsigned int neighSize = nodalSFDneighboursId.size();
if (neighSize > 1)
{
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
LHS_Contribution = ZeroMatrix(neighSize, neighSize);
RHS_Contribution = ZeroVector(neighSize);
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double volumetricCoeff = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) + 2.0 * deviatoricCoeff / 3.0;
const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X);
double deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
double volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += (-deltaPressure / volumetricCoeff + volumetricDefRate) * nodalVolume;
bool stabilizationNeeded = false;
if ((itNode->Is(FLUID) || (itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(POISSON_RATIO) > 0.49999)))
{
stabilizationNeeded = true;
}
else
{
for (unsigned int i = 0; i < neighSize; i++)
{
unsigned int idNode = nodalSFDneighboursId[i];
EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId();
}
}
if (stabilizationNeeded == true)
{
/* Vector& rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); */
unsigned int firstRow = 0;
unsigned int firstCol = 0;
double meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
double characteristicLength = 2.0 * meanMeshSize;
/* double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); */
double density = itNode->FastGetSolutionStepValue(DENSITY);
/* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */
double nodalVelocity = 0;
if (dimension == 2)
{
nodalVelocity = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y));
}
else if (dimension == 3)
{
nodalVelocity = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) +
itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z));
}
double tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocity * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval);
/* tauStab*=10.0; */
/* tauStab=0.0000001; */
/* tauStab=100.0; */
LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval);
RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval);
if (itNode->Is(FREE_SURFACE))
{
/* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */
/* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */
/* double boundLHScontribution=4.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */
/* std::cout<<"boundLHScontribution "<<boundLHScontribution<<std::endl; */
/* if(itNode->IsNot(RIGID)){ */
LHS_Contribution(0, 0) += +4.0 * 2.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += -4.0 * 2.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0);
/* } */
/* else { */
/* LHS_Contribution(0,0) += + 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */
/* RHS_Contribution[0] += - 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); */
/* } */
const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL);
Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1);
/* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */
double nodalNormalAcceleration = 0;
double nodalNormalProjDefRate = 0;
if (dimension == 2)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */
/* nodalNormalAcceleration=(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))*Normal[0]/timeInterval + */
/* (itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))*Normal[1]/timeInterval; */
nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1];
}
else if (dimension == 3)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] +
2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */
/* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */
}
/* RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; */
double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize;
double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize);
/* std::cout<<"nodalNormalAcceleration= "<<nodalNormalAcceleration<<std::endl; */
/* std::cout<<"nodalNormalProjDefRate= "<<nodalNormalProjDefRate<<std::endl; */
/* std::cout<<"meanMeshSize "<<meanMeshSize<<std::endl; */
/* accelerationContribution=0; */
/* deviatoricContribution=0; */
/* if(itNode->IsNot(RIGID)){ */
RHS_Contribution[0] += 2.0 * tauStab * (accelerationContribution + deviatoricContribution) * nodalVolume;
/* }else{ */
/* RHS_Contribution[0] += 1.0/3.0* tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; */
/* } */
}
for (unsigned int i = 0; i < neighSize; i++)
{
unsigned int idNode = nodalSFDneighboursId[i];
EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId();
double Density = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(DENSITY);
array_1d<double, 3> &VolumeAcceleration = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(VOLUME_ACCELERATION);
double dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
double dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
double dNdZi = 0;
if (dimension == 2)
{
RHS_Contribution[i] += -tauStab * Density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume;
}
else if (dimension == 3)
{
dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2];
RHS_Contribution[i] += -tauStab * Density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume;
}
firstRow = 0;
for (unsigned int j = 0; j < neighSize; j++)
{
unsigned int idNodeJ = nodalSFDneighboursId[j];
double dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
double dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
if (dimension == 2)
{
////////////////// Laplacian term for LHS
LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume;
////////////////// Laplacian term L_ij*P_j for RHS
RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE, 0);
}
else if (dimension == 3)
{
double dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2];
////////////////// Laplacian term for LHS
LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume;
////////////////// Laplacian term L_ij*P_j for RHS
RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE, 0);
}
/* std::cout << "dNdXi= " <<dNdXi<< "dNdYi= " <<dNdYi<< "dNdYj= " <<dNdYj<< "dNdXj= " <<dNdXj<< std::endl; */
firstRow += dimension;
}
firstCol += dimension;
}
}
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
/* AssembleLHS(A, LHS_Contribution, EquationId); */
/* AssembleRHS(b, RHS_Contribution, EquationId); */
}
}
}
/* /\* std::cout<<".... Build Nodally Continuity Equation DONE!"<<std::endl; *\/ */
/* const double stop_build = OpenMPUtils::GetCurrentTime(); */
/* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; */
/* //for (int i = 0; i < A_size; i++) */
/* // omp_destroy_lock(&lock_array[i]); */
/* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; */
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
//prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING("NodalResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl;
}
//prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
std::cout << "CONTINUITY EQ: buildAndSolve " << std::endl;
Timer::Start("Build");
/* Build(pScheme, rModelPart, A, b); */
//boost::timer build_time;
BuildNodally(pScheme, rModelPart, A, b);
//std::cout << "CONTINUITY EQ: build_time : " << build_time.elapsed() << std::endl;
Timer::Stop("Build");
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
//boost::timer solve_time;
SystemSolveWithPhysics(A, Dx, b, rModelPart);
//std::cout << "CONTINUITY EQ: solve_time : " << solve_time.elapsed() << std::endl;
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, b);
SystemSolve(A, Dx, b);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemVectorType &b) override
{
KRATOS_TRY
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k < ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0f;
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n"
<< std::endl;
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
#pragma omp parallel firstprivate(nelements, ElementalDofList)
{
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing condition loop" << std::endl;
ConditionsArrayType &pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
}
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing tree reduction\n"
<< std::endl;
// Here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
if (this->GetEchoLevel() > 2)
{
//just for debugging
std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
std::cout << i << " - " << i + new_max << std::endl;
}
}
std::cout << "********************" << std::endl;
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n"
<< std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back((*it));
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
{
omp_destroy_lock(&mlock_array[i]);
}
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
{
omp_init_lock(&mlock_array[i]);
}
#endif
KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2)) << "End of setup dof set\n"
<< std::endl;
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
//for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
// dof_iterator->SetEquationId(free_id++);
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
//boost::timer contruct_matrix;
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH(" it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//std::cout << "CONTINUITY EQ: contruct_matrix : " << contruct_matrix.elapsed() << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
std::cout << "Initialize Solution Step in nodal res based " << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k < ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
//KRATOS_WATCH(__LINE__)
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
std::size_t system_size = A.size1();
std::vector<double> scaling_factors(system_size, 0.0f);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k < ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
if (dof_iterator->IsFixed())
scaling_factors[k] = 0.0f;
else
scaling_factors[k] = 1.0f;
}
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k)
{
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k + 1];
bool empty = true;
for (std::size_t j = col_begin; j < col_end; ++j)
{
if (Avalues[j] != 0.0)
{
empty = false;
break;
}
}
if (empty == true)
{
A(k, k) = 1.0;
b[k] = 0.0;
}
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(system_size); ++k)
{
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k + 1];
double k_factor = scaling_factors[k];
if (k_factor == 0)
{
// zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k)
Avalues[j] = 0.0;
// zero out the RHS
b[k] = 0.0;
}
else
{
// zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if (scaling_factors[Acol_indices[j]] == 0)
Avalues[j] = 0.0;
}
}
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
#ifdef _OPENMP
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
mlock_array.resize(0);
#endif
BaseType::Clear();
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
std::cout << " ConstructMatrixStructure for Continuity equation " << std::endl;
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
#ifdef USE_GOOGLE_HASH
std::vector<google::dense_hash_set<std::size_t>> indices(equation_size);
const std::size_t empty_key = 2 * equation_size + 10;
#else
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#endif
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
#ifdef USE_GOOGLE_HASH
indices[iii].set_empty_key(empty_key);
#else
indices[iii].reserve(40);
#endif
}
Element::EquationIdVectorType EquationId;
/* #pragma omp parallel */
/* { */
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
/* VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); */
/* const unsigned int neighSize = nodalSFDneighboursId.size(); */
const unsigned int neighSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER).size();
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
/* const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); */
const unsigned int xpos = itNode->GetDofPosition(PRESSURE);
for (unsigned int i = 0; i < neighSize; i++)
{
/* unsigned int idNode=nodalSFDneighboursId[i]; */
unsigned int idNode = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER)[i];
EquationId[i] = rModelPart.Nodes()[idNode].GetDof(PRESSURE, xpos).EquationId();
}
for (std::size_t i = 0; i < EquationId.size(); i++)
{
if (EquationId[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[EquationId[i]]);
#endif
auto &row_indices = indices[EquationId[i]];
for (auto it = EquationId.begin(); it != EquationId.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[EquationId[i]]);
#endif
}
}
/* for (std::size_t i = 0; i < EquationId.size(); i++) */
/* { */
/* #ifdef _OPENMP */
/* omp_set_lock(&mlock_array[EquationId[i]]); */
/* #endif */
/* auto& row_indices = indices[EquationId[i]]; */
/* row_indices.insert(EquationId.begin(), EquationId.end()); */
/* #ifdef _OPENMP */
/* omp_unset_lock(&mlock_array[EquationId[i]]); */
/* #endif */
/* } */
}
/* } */
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii < nconditions; iii++)
{
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId(*(i_condition.base()), ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[ids[i]]);
#endif
auto &row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
#ifdef _OPENMP
omp_unset_lock(&mlock_array[ids[i]]);
#endif
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
/* std::cout<<"..... ConstructMatrixStructure for Continuity equation DONE"<<std::endl; */
}
/* virtual void ConstructMatrixStructure( */
/* typename TSchemeType::Pointer pScheme, */
/* TSystemMatrixType& A, */
/* ModelPart& rModelPart) */
/* { */
/* //filling with zero the matrix (creating the structure) */
/* Timer::Start("MatrixStructure"); */
/* // Getting the elements from the model */
/* const int nelements = static_cast<int>(rModelPart.Elements().size()); */
/* // Getting the array of the conditions */
/* const int nconditions = static_cast<int>(rModelPart.Conditions().size()); */
/* ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); */
/* ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); */
/* ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); */
/* const std::size_t equation_size = BaseType::mEquationSystemSize; */
/* #ifdef USE_GOOGLE_HASH */
/* std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); */
/* const std::size_t empty_key = 2*equation_size + 10; */
/* #else */
/* std::vector<std::unordered_set<std::size_t> > indices(equation_size); */
/* #endif */
/* #pragma omp parallel for firstprivate(equation_size) */
/* for (int iii = 0; iii < static_cast<int>(equation_size); iii++) */
/* { */
/* #ifdef USE_GOOGLE_HASH */
/* indices[iii].set_empty_key(empty_key); */
/* #else */
/* indices[iii].reserve(40); */
/* #endif */
/* } */
/* Element::EquationIdVectorType ids(3, 0); */
/* #pragma omp parallel for firstprivate(nelements, ids) */
/* for(int iii=0; iii<nelements; iii++) */
/* { */
/* typename ElementsContainerType::iterator i_element = el_begin + iii; */
/* pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo); */
/* for (std::size_t i = 0; i < ids.size(); i++) */
/* { */
/* #ifdef _OPENMP */
/* omp_set_lock(&mlock_array[ids[i]]); */
/* #endif */
/* auto& row_indices = indices[ids[i]]; */
/* row_indices.insert(ids.begin(), ids.end()); */
/* #ifdef _OPENMP */
/* omp_unset_lock(&mlock_array[ids[i]]); */
/* #endif */
/* } */
/* } */
/* #pragma omp parallel for firstprivate(nconditions, ids) */
/* for (int iii = 0; iii<nconditions; iii++) */
/* { */
/* typename ConditionsArrayType::iterator i_condition = cond_begin + iii; */
/* pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); */
/* for (std::size_t i = 0; i < ids.size(); i++) */
/* { */
/* #ifdef _OPENMP */
/* omp_set_lock(&mlock_array[ids[i]]); */
/* #endif */
/* auto& row_indices = indices[ids[i]]; */
/* row_indices.insert(ids.begin(), ids.end()); */
/* #ifdef _OPENMP */
/* omp_unset_lock(&mlock_array[ids[i]]); */
/* #endif */
/* } */
/* } */
/* //count the row sizes */
/* unsigned int nnz = 0; */
/* for (unsigned int i = 0; i < indices.size(); i++) */
/* nnz += indices[i].size(); */
/* A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); */
/* double* Avalues = A.value_data().begin(); */
/* std::size_t* Arow_indices = A.index1_data().begin(); */
/* std::size_t* Acol_indices = A.index2_data().begin(); */
/* //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! */
/* Arow_indices[0] = 0; */
/* for (int i = 0; i < static_cast<int>(A.size1()); i++) */
/* Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); */
/* #pragma omp parallel for */
/* for (int i = 0; i < static_cast<int>(A.size1()); i++) */
/* { */
/* const unsigned int row_begin = Arow_indices[i]; */
/* const unsigned int row_end = Arow_indices[i+1]; */
/* unsigned int k = row_begin; */
/* for (auto it = indices[i].begin(); it != indices[i].end(); it++) */
/* { */
/* Acol_indices[k] = *it; */
/* Avalues[k] = 0.0; */
/* k++; */
/* } */
/* indices[i].clear(); //deallocating the memory */
/* std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); */
/* } */
/* A.set_filled(indices.size()+1, nnz); */
/* Timer::Stop("MatrixStructure"); */
/* } */
//**************************************************************************
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &EquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
#else
double &r_a = b[i_global];
const double &v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
#endif
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&lock_array[i_global]);
#endif
//note that computation of reactions is not performed here!
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType &b,
LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemVectorType &b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType &pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType &ConditionsArray = rModelPart.Conditions();
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
//for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it)
const int nconditions = static_cast<int>(ConditionsArray.size());
//#pragma omp parallel for firstprivate(nconditions, RHS_Contribution, EquationId) schedule(dynamic, 1024)
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; i++)
{
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int> &partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
inline void AssembleRowContribution(TSystemMatrixType &A, const Matrix &Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType &EquationId)
{
double *values_vector = A.value_data().begin();
std::size_t *index1_vector = A.index1_data().begin();
std::size_t *index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0], left_limit, index2_vector);
size_t last_found = EquationId[0];
#ifndef USE_LOCKS_IN_ASSEMBLY
double &r_a = values_vector[last_pos];
const double &v_a = Alocal(i_local, 0);
#pragma omp atomic
r_a += v_a;
#else
values_vector[last_pos] += Alocal(i_local, 0);
#endif
//now find all of the other entries
size_t pos = 0;
for (unsigned int j = 1; j < EquationId.size(); j++)
{
unsigned int id_to_find = EquationId[j];
if (id_to_find > last_found)
pos = ForwardFind(id_to_find, last_pos + 1, index2_vector);
else
pos = BackwardFind(id_to_find, last_pos - 1, index2_vector);
#ifndef USE_LOCKS_IN_ASSEMBLY
double &r = values_vector[pos];
const double &v = Alocal(i_local, j);
#pragma omp atomic
r += v;
#else
values_vector[pos] += Alocal(i_local, j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t *index_vector)
{
unsigned int pos = start;
while (id_to_find != index_vector[pos])
pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t *index_vector)
{
unsigned int pos = start;
while (id_to_find != index_vector[pos])
pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
ofmo-scf.c | /**
* @file ofmo-scf.c
*
* SCF計算プログラムのサンプルコード。
* 通信回数を減らすために、MPI_Allreduce + MPI_Bcastでの
* 実装を行った。
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "ofmo-def.h"
#include "ofmo-mat.h"
#include "ofmo-integ.h"
#include "ofmo-diis.h"
#include "ofmo-prof.h"
#include "ofmo-twoint.h"
#include "ofmo-scf.h"
#ifdef USE_MPI
#include <mpi.h>
#else
#include "mpi-dummy.h"
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#include "omp-dummy.h"
#endif
#define ZERO 0.e0
#define TWO 2.e0
#define HALF .5e0
#define EPS_PS4 1.e-30
#define EPS_ERI 1.e-15
#define EPS_SCH 1.e-12
#ifdef USE_CUDA
#include "cuda/cudalib.h"
#include "cuda/cuda-integ.h"
#endif
static enum ofmo_scf_convType convType = scf;
#ifndef MINIMUM_SCF
#define MINIMUM_SCF 0
#endif
static int minscfcyc = MINIMUM_SCF;
void ofmo_scf_set_convType(enum ofmo_scf_convType type)
{
convType = type;
}
void ofmo_scf_set_minimum_scf(int mincyc)
{
minscfcyc = mincyc;
}
// debug
//#include "ofmo-twoint.h"
// debug
static double check_sum2( const int n, const double a[] ) {
double limit=1.e2, sum;
int i;
sum = 0.e0;
for ( i=0; i<n; i++ ) {
if ( fabs(a[i]) < limit ) sum += a[i];
}
return sum;
}
static int RESERVED_NAO = 0;
static double *_U_ = NULL;
static double *_Dold_ = NULL;
static double *_G_ = NULL;
static double *_F_ = NULL;
static double *_dD_ = NULL;
static double *_dG_ = NULL;
static double *_T_ = NULL;
static double *_C_ = NULL;
static double *_ev_ = NULL;
static int *_iv_ = NULL;
static double *_Foda_ = NULL;
static double *_Doda_ = NULL;
/** SCF関数内部で利用しているメモリの開放を行う関数
*
* SCF関数内部で利用しているメモリの開放を行う関数。
* 実行終了時に呼ばれるので、ユーザが露に呼ぶ必要はない。
*
* */
static void ofmo_scf_dealloc() {
Free( _U_ );
Free( _Dold_ );
Free( _G_ );
Free( _dD_ );
Free( _dG_ );
Free( _T_ );
Free( _F_ );
Free( _Foda_ );
Free( _Doda_ );
Free( _C_ );
Free( _ev_ );
Free( _iv_ );
RESERVED_NAO = 0;
_U_ = NULL;
_Dold_ = NULL;
_G_ = NULL;
_dD_ = NULL;
_dG_ = NULL;
_T_ = NULL;
_F_ = NULL;
_Foda_ = NULL;
_Doda_ = NULL;
_C_ = NULL;
_ev_ = NULL;
_iv_ = NULL;
}
/** SCF関数内部で利用する配列を確保する関数
*
* SCF関数内部で利用する配列を確保する関数。
* 以下の配列を確保している。
*
* @arg \c _U_ 重なり行列をCholesky分解した結果(正方行列)
* @arg \c _Dold_ SCFステップの1つ前の密度行列(圧縮U形式)
* @arg \c _G_ 2電子ハミルトン行列を代入する配列(圧縮U形式)
* @arg \c _dD_ SCFステップの1つ前との密度行列差(圧縮U形式)
* @arg \c _dG_ 密度行列差を元に計算した2電子ハミルトン行列(圧縮U形式)。
* incrementalなFock行列生成時に用いる
* @arg \c _T_ \c MPI_Allreduce 関数などの利用時に用いる一時配列(正方行列)
* @arg _C_ MO係数行列が代入される配列(正方行列)
* @arg _ev_ MOエネルギー(固有値)が代入される配列(ベクトル)
* @arg _iv_ 固有値計算などで用いる整数ベクトル
*
* @param[in] nao AO数
*
* @retval 0 正常終了(領域の確保が出来た)
* @retval -1 異常終了(領域確保に失敗した)
*
* @ingroup ofmo-rhf
* */
int ofmo_scf_init( const int nao ) {
static int called = false;
if ( nao > RESERVED_NAO ) {
int nao2, nnao;
nao2 = nao * (nao+1) / 2;
nnao = nao * nao;
ofmo_scf_dealloc();
_U_ = (double*)malloc( sizeof(double) * nnao );
_Dold_ = (double*)malloc( sizeof(double) * nao2 );
_G_ = (double*)malloc( sizeof(double) * nao2 );
_dD_ = (double*)malloc( sizeof(double) * nao2 );
_dG_ = (double*)malloc( sizeof(double) * nao2 );
_T_ = (double*)malloc( sizeof(double) * nnao );
_F_ = (double*)malloc( sizeof(double) * nao2 );
_Foda_ = (double*)malloc( sizeof(double) * nao2 );
_Doda_ = (double*)malloc( sizeof(double) * nao2 );
_C_ = (double*)malloc( sizeof(double) * nnao );
_ev_ = (double*)malloc( sizeof(double) * nao );
_iv_ = (int*)malloc( sizeof(int) * nao );
if ( _U_ == NULL || _Dold_ == NULL || _G_ == NULL ||
_dD_ == NULL || _dG_ == NULL || _T_ == NULL ) {
dbg("ERROR : Failure in memory allocation\n");
ofmo_scf_dealloc();
return -1;
}
RESERVED_NAO = nao;
// information of memory size
if ( fp_prof != NULL ) {
double dsize;
dsize = (double)(nao2*7 + nnao*3 + nao )*sizeof(double);
dsize += (double)( nao * sizeof(int) );
dsize /= (double)(1024*1024); // MB
fprintf(fp_prof,
"== allocd memory size in ofmo-scf.c = %10.3f MB\n",
dsize );
}
if ( !called ) {
atexit( ofmo_scf_dealloc );
called = true;
}
}
return 0;
}
/** 核間反発エネルギーを求める
*
* 核間反発エネルギーを計算する。単位はhartreeである。
*
* @param[in] nat 原子数
* @param[in] atomic_number[] 各原子の原子番号
* @param[in] atom_x[] 原子のx座標(単位 bohr)
* @param[in] atom_y[] 原子のy座標(単位 bohr)
* @param[in] atom_z[] 原子のz座標(単位 bohr)
*
* @return 計算された核間反発エネルギー
*
* @ingroup ofmo-rhf
* */
double ofmo_calc_nuclear_repulsion(
const int nat, const int atomic_number[],
const double atom_x[], const double atom_y[],
const double atom_z[] ) {
double *datomic_number, enuc, rab2;
int i, j;
ofmo_scf_init( nat );
/* Data type conversion of atomic number from int to double.
* Supposing nao > nat */
datomic_number = _ev_;
for ( i=0; i<nat; i++ ) datomic_number[i] = (double)atomic_number[i];
/* calc. nuclear repulsion energy in au.*/
enuc = 0.e0;
for ( i=0; i<nat; i++ ) {
for ( j=0; j<i; j++ ) {
rab2 = (atom_x[i]-atom_x[j]) * (atom_x[i]-atom_x[j])
+ (atom_y[i]-atom_y[j]) * (atom_y[i]-atom_y[j])
+ (atom_z[i]-atom_z[j]) * (atom_z[i]-atom_z[j]);
enuc += (datomic_number[i]*datomic_number[j] * sqrt(1.e0/rab2));
}
}
return enuc;
}
/** 閉殻系の電子密度行列をMO係数行列から求める
*
* 閉殻系の電子密度行列を、与えられたMO係数行列を用いて計算する
*
* @attention
* @li 内部で利用している関数 \c ofmo_transpose_matrix ,
* \c ofmo_dot_product
* @li 内部で利用している一時配列 \c _T_
*
* @param[in] nao AO数
* @param[in] nocc 閉殻軌道数。電子数の半分である。
* @param[in] C[nao*nao] MO係数行列が代入された配列
* @param[out] D[nao*(nao+1)/2] 計算された密度行列要素が代入される
* 配列(圧縮U形式)
*
* @ingroup ofmo-rhf
* */
int ofmo_scf_make_rhf_density(const int nao, const int nocc,
const double C[], double D[]) {
double *Ct, *ci, *cj;
int i, j, ij;
ofmo_scf_init( nao );
Ct = _T_;
ofmo_transpose_matrix(nao, C, Ct);
ij = 0;
for (i=0, ci=Ct; i<nao; i++, ci+=nao) {
for (j=0, cj=Ct; j<=i; j++, cj+=nao) {
D[ij++] = ofmo_dot_product(nocc, ci, cj);
}
}
return 0;
}
/** Fock行列を計算して、電子エネルギーを求める
*
* 与えられた1電子ハミルトン行列と2電子ハミルトン行列をもとに、
* Fock行列を計算する。また、それらと密度行列を使って、電子エネルギー
* を求める。
*
* @attention
* @li 与えられる配列は、すべて、圧縮U形式である
*
* @param[in] nao AO数(=行列サイズ)
* @param[in] D[] 密度行列
* @param[in] H[] 一電子ハミルトン行列。\c const 形式ではないが、
* 実質、この関数呼び出しでは値が変化しない
* @param[out] F[] Fock行列(H+G)
*
* @return 計算された電子エネルギーの値
*
* @ingroup ofmo-rhf
* */
double ofmo_scf_rhf_energy(const int nao, const double D[],
const double H[], const double F[]) {
int nao2, i;
double energy;
nao2 = nao * (nao+1) /2;
// E = tr{D(H+F)}
energy = 0.e0;
for (i=0; i<nao2; i++) energy += D[i] * (H[i] + F[i]);
energy *= 2.0;
return energy;
}
/** Mulliken population解析を行う
*
* @param[in] nat 原子数
* @param[in] nao AO数
* @param[in] maxlqn 最大軌道量子数
* @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] SP[] 重なり行列(圧縮U形式)
* @param[in] DP[] 密度行列(圧縮U形式)
* @param[out] aopop[] 計算されたAO population
* @param[out] atpop[] 計算されたatomic population
*
* @ingroup ofmo-rhf
* */
int ofmo_scf_mulliken_population(
const int nat, const int nao, const int maxlqn,
const int leading_cs[], const int shel_atm[], const int shel_ini[],
const double SP[], const double DP[],
double aopop[], double atpop[] ) {
double *D, *S, *DS;
int lqn, ics, ics0, ics1, iao, iao0, iao1, atm, nnao;
int NNAO[] = { 1, 3, 6, 10, 15};
ofmo_scf_init( nao );
D = _U_;
S = _C_;
DS = _T_;
ofmo_unpack_matrix( nao, SP, S );
ofmo_unpack_matrix( nao, DP, D );
ofmo_dgemm( nao, "T", "N", 2.e0, D, S, 0.e0, DS );
memset( aopop, '\0', sizeof(double) * nao );
memset( atpop, '\0', sizeof(double) * nat );
for ( lqn=0; lqn<=maxlqn; lqn++ ) {
nnao = NNAO[lqn];
ics0 = leading_cs[lqn];
ics1 = leading_cs[lqn+1];
for ( ics=ics0; ics<ics1; ics++ ) {
atm = shel_atm[ics];
iao0 = shel_ini[ics];
iao1 = iao0 + nnao;
for ( iao=iao0; iao<iao1; iao++ ) {
aopop[iao] = DS[iao*nao + iao];
atpop[atm] += aopop[iao];
}
}
}
return 0;
}
/** 初期電子密度行列作成ルーチン
*
* ソート基底関数の並びで、拡張Huckel法を用いた初期密度行列作成を行う。
*
* @param[in] nat 原子数
* @param[in] ncs CS数
* @param[in] nao AO数
* @param[in] maxlqn 最大軌道量子数
* @param[in] nocc 二重占有軌道数(閉殻系では電子数の半分)
* @param[in] atomic_number[iat] \c iat 番目の原子の原子番号 (
* \f$ 0\le \tt{iat} < \tt{nat} \f$)
* @param[in] shel_atm[ics] \c ics 番目のCSが属する原子の番号
* @param[in] shel_ini[ics] \c ics 番目のCSの属するAOの先頭AO番号
* @param[in] leading_cs[lqn] \c lqn の軌道量子数を持つCSの先頭CS番号
* @param[in] SP[] ソート済み圧縮U形式の重なり行列
* @param[out] DP[] 計算されたソート済み圧縮U形式の初期密度行列
*
* @note
* デカルト基底関数系のみに対応している
*
* @ingroup ofmo-rhf
* */
int ofmo_scf_init_density_ehuckel(
const int nat, const int ncs, const int nao, const int maxlqn,
const int nocc, const int atomic_number[],
const int leading_cs[], const int shel_atm[], const int shel_ini[],
const double SP[], double DP[], double aopop[], double atpop[] ) {
int lqn;
int *INI;
int NNAO[] = {1, 3, 6, 10, 15};
double *F;
int atm, cs_tmp=0, iatm, iatn;
int ics, ics0, ics1, iao, iao0, iao1, iini, ilqn, i2;
int jcs, jcs0, jcs1, jao, jao0, jao1, jini, jlqn, jcs_max, jao_max;
int ii, ij, jj;
double val, coe;
double POTE[] = { 0.0, /* ダミー要素 */
0.50,0.90,0.20,0.34,0.30,0.41,0.53,0.50,
0.64,0.79,0.19,0.28,0.22,0.30,0.39,0.38,0.48,0.58,0.16,0.22,
0.24,0.25,0.25,0.25,0.27,0.29,0.29,0.28,
0.28,0.35,0.22,0.29,0.36,0.36,0.43,0.51,0.15,0.21,
0.23,0.15,0.25,0.26,0.27,0.27,0.27,0.31,
0.28,0.33,0.21,0.27,0.32,0.33,0.38,0.45,
};
ofmo_scf_init( nao );
INI = _iv_;
F = _G_;
// INIの初期化
// 対応するCSが、内殻(0)、原子価殻(1)、その他(2以上)のどれかを表す
for ( lqn=0; lqn<=maxlqn; lqn++ ) {
ics0 = leading_cs[lqn];
ics1 = leading_cs[lqn+1];
atm = -1;
for ( ics=ics0; ics<ics1; ics++ ) {
iatm = shel_atm[ics];
if ( iatm != atm ) {
cs_tmp = 0;
atm = iatm;
}
iatn = atomic_number[iatm];
if ( iatn <=2 ) { // H, He
INI[ics] = ( lqn==0 ? 1 : 2 );
} else if ( iatn <=10 ) { // Li - Ne
if ( lqn==0 ) INI[ics] = ( cs_tmp==0 ? 0 : 1 );
else INI[ics] = ( lqn==1 ? 1 : 2 );
} else if ( iatn <=18 ) { // Na - Ar
if ( lqn==0 ) INI[ics] = ( cs_tmp<=1 ? 0 : 1 );
else if ( lqn==1 ) INI[ics] = ( cs_tmp==0 ? 0 : 1 );
else INI[ics] = 2;
} else if ( iatn <= 36 ) { // K - Kr
if ( lqn==0 ) INI[ics] = ( cs_tmp<=2 ? 0 : 1 );
else if ( lqn==1 ) INI[ics] = ( cs_tmp<=1 ? 0 : 1 );
else INI[ics] = 2;
} else {
dbg("atomic number (%d) is not supported, yet\n", iatn );
fflush(stdout);
return -1;
}
cs_tmp++;
}
}
// Fock行列の作成(菊池の方法、拡張Huckelの変形)
for ( lqn=0; lqn<=maxlqn; lqn++ ) {
ics0 = leading_cs[lqn];
ics1 = leading_cs[lqn+1];
/* 対角成分 */
for ( ics=ics0; ics<ics1; ics++ ) {
iao0 = shel_ini[ics];
iao1 = iao0 + NNAO[lqn];
iatm = shel_atm[ics];
iatn = atomic_number[iatm];
iini = INI[ics];
if ( iini == 0 ) val = -2.e0;
else if ( iini == 1 ) val = -POTE[iatn];
else val = 1.e3;
for ( iao=iao0; iao<iao1; iao++ )
F[((iao*iao+iao)>>1) + iao ] = val;
}
}
/* 非対角成分 */
for ( ilqn=0; ilqn<=maxlqn; ilqn++ ) {
ics0 = leading_cs[ilqn];
ics1 = leading_cs[ilqn+1];
for ( jlqn=0; jlqn<=ilqn; jlqn++ ) {
jcs0 = leading_cs[jlqn];
jcs1 = leading_cs[jlqn+1];
for ( ics=ics0; ics<ics1; ics++ ) {
iao0 = shel_ini[ics];
iao1 = iao0 + NNAO[ilqn];
iini = INI[ics];
jcs_max = ( jlqn == ilqn ? (ics+1) : jcs1 );
for ( jcs=jcs0; jcs<jcs_max; jcs++ ) {
jao0 = shel_ini[jcs];
jao1 = jao0 + NNAO[jlqn];
jini = INI[jcs];
if (iini>=2 || jini>=2) coe = 0.0;
else if (iini==1 && jini==1) coe = 2.0;
else if (iini==0 && jini==0) coe = 0.1;
else coe = 0.5;
for ( iao=iao0; iao<iao1; iao++ ) {
i2 = (iao*iao+iao)>>1;
ii = i2 + iao;
jao_max = (jcs==ics ? (iao+1) : jao1);
for ( jao=jao0; jao<jao_max; jao++ ) {
if ( iao == jao ) continue;
ij = i2 + jao;
jj = ((jao*jao+jao)>>1) + jao ;
F[ij] = coe * SP[ij] * ( F[ii] + F[jj] );
}
}
}
}
}
}
// 一般化固有値問題求値+density作成
double *U, *EV, *C;
EV = _ev_;
C = _C_;
U = _U_;
ofmo_unpack_matrix(nao, SP, U);
ofmo_chodec(nao, U);
ofmo_unpack_matrix(nao, F, C);
ofmo_solv_GSEP(nao, U, C, EV);
ofmo_scf_make_rhf_density(nao, nocc, C, DP);
ofmo_scf_mulliken_population( nat, nao, maxlqn, leading_cs,
shel_atm, shel_ini, SP, DP, aopop, atpop );
return 0;
}
/** テスト用RHFルーチン
*
* テスト用のRHF-SCFルーチン
*
* @param[in] comm MPIのコミュニケータ
* @param[in] maxlqn 最大軌道量子数
* @param[in] ncs CS数
* @param[in] nao AO数
* @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号
* @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号
* @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号
* @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位)
* @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位)
* @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位)
* @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の
* 先頭CSペア番号
* @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分
* @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号
* @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、
* \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。
* @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる
* PSペアの先頭PSペア番号
* @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和
* \f$ \zeta = \zeta_a + \zeta_b \f$
* @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数
* \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b}
* \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b}
* ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2
* \right]\f]
* @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の
* \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$
*
* @param[in] nat 原子数
* @param[in] atomic_number[iat] 原子の番号 \c iat の原子番号
* @param[in] ncharge 分子の電荷
*
* @param[in] S[] 重なり行列(圧縮U形式)
* @param[in] H[] 一電子ハミルトン行列(圧縮U形式)
*
* @param[in] maxscfcyc 最大SCF繰り返し回数
* @param[in] scfe エネルギーの収束条件
* @param[in] scfd 密度行列の収束条件
*
* @param[in,out] (入力時)初期密度行列/(出力時)SCF収束時の密度行列
* (圧縮U形式)
* @param[out] F[] SCF収束時のソート済みFock行列(圧縮U形式)
* @param[out] C[] SCF収束時のソート済みMO係数行列(正方行列)
* @param[out] moe[] SCF収束時のMOエネルギー(ベクトル)
* @param[out] *Eelec SCF収束時の電子エネルギー(hartree)
*
* @retval 0 正常終了時(SCFが収束した)
* @retval -1 異常終了時(SCFが収束しなかった)
*
* @ingroup ofmo-rhf
* */
int ofmo_scf_rhf(
MPI_Comm comm, const int maxlqn, const double Enuc,
const int ncs, const int nao,
const int leading_cs[],
// 基底関数データ
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[],
// カットオフテーブルデータ
const int leading_cs_pair[], const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// 分子データ
const int nat,
const int nocc,
// 積分データ
double S[], double H[],
// 制御用データ
const int maxscfcyc, const double scfe, const double scfd,
// 結果代入用データ
double D[], double C[], double moe[], double *Etot) {
int nao2;
double *U, *Dold, *dG, *dD, *TMP, *F;
double Eold, Enew, deltaD = 1.e100, deltaE;
int itera, myrank, nprocs;
double errdiis=0.0e0;
// 様々な閾値関連変数
double tol_diis = 1.e-2;
// 様々なフラグ
int dodiis=false, flag_scf;
//int doshift=false;
int ierr;
// ODA
int koda;
double *Foda, *Doda, Eoda, lambda1;
// シフト演算子関連
//double shifto = 1.0, shiftv =1.0;
int cid_gmat;
int tid_init, tid_gmat, tid_allred, tid_diis, tid_diag, tid_bcast;
int tid_tot;
// Screening threshold
#if 0
float eps_ps4 = EPS_PS4;
float eps_eri = EPS_ERI;
float eps_sch = EPS_SCH;
float eps_fac = (scfe>1e-8)? scfe/1e-8: 1.0;
eps_ps4 *= eps_fac;
eps_eri *= eps_fac;
eps_sch *= eps_fac;
if(fp_prof) fprintf(fp_prof, "scfd scfe eps_sch: %e %e %e \n", scfd, scfe, eps_sch);
#else
float eps0_ps4=ofmo_twoint_eps_ps4(0);
float eps0_eri=ofmo_twoint_eps_eri(0);
float eps0_sch=ofmo_twoint_eps_sch(0);
float eps_ps4 = eps0_ps4;
float eps_eri = eps0_eri;
float eps_sch = eps0_sch;
#endif
int maxitera = maxscfcyc;
int minitera = minscfcyc;
cid_gmat = ofmo_create_thread_timer( "GMAT", 0 );
tid_init = ofmo_create_proc_timer( "INIT", 2 );
tid_gmat = ofmo_create_proc_timer( "GMAT", 2 );
tid_allred = ofmo_create_proc_timer( "ALLRED", 2 );
tid_diis = ofmo_create_proc_timer( "DIIS", 2 );
tid_diag = ofmo_create_proc_timer( "DIAG", 2 );
tid_bcast = ofmo_create_proc_timer( "BCAST", 2 );
tid_tot = ofmo_create_proc_timer( "TOT", 2 );
MPI_Comm_rank( comm, &myrank );
MPI_Comm_size( comm, &nprocs );
ofmo_start_proc_timer( tid_init );
ofmo_start_proc_timer( tid_tot );
ofmo_scf_init( nao );
nao2 = nao * (nao+1) / 2;
U = _U_;
Dold = _Dold_;
//G = _G_;
dG = _dG_;
dD = _dD_;
TMP = _T_;
F = _F_;
Foda = _Foda_;
Doda = _Doda_;
// 重なり行列のCholesky分解
ofmo_unpack_matrix(nao, S, U);
ierr = ofmo_chodec(nao, U);
if (ierr != 0) {
if (myrank == 0)
dbg("ERROR: Failure in Cholesky decomposition of S\n");
return -1;
}
// DIIS用にS=U'UのUを保存
ofmo_diis_init( nao, S );
// 初期設定
//Eold = ZERO;
Eold = *Etot;
memset( Dold, '\0', sizeof(double)*nao2 );
ofmo_dcopy( nao2, D, dD ); /* for incremental Fock */
ofmo_twoint_alloc_Dcs(ncs); /* for Schwarz screening with Density */
int incore = -1;
int last_incore = -1;
#pragma omp parallel
{
int mythread = omp_get_thread_num();
int incore2 = ofmo_twoint_get_last_eri_type( mythread );
#pragma omp critical
if (incore2 > incore) incore = incore2;
}
MPI_Allreduce(&incore, &last_incore, 1, MPI_INT, MPI_MAX, comm);
ofmo_twoint_set_global_last_eri_type(last_incore);
if(fp_prof) fprintf(fp_prof, "g_incore: %d\n", last_incore);
#ifdef USE_CUDA
{
int nblk = -1, nthb = -1;
int ret = 0;
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int ncspair = leading_cs_pair[Lab+1];
int npspair = csp_leading_ps_pair[ncspair];
float *csp_schwarz_f;
csp_schwarz_f = (float*)malloc( sizeof(float) * ncspair );
for (int i=0; i<ncspair; i++) csp_schwarz_f[i]=(float)csp_schwarz[i];
ret = cuda_SCF_Init(ncs, nat, maxlqn, ncspair,npspair, nao,
shel_atm, shel_ini, atom_x, atom_y, atom_z,
leading_cs,
leading_cs_pair, csp_leading_ps_pair, csp_ics, csp_jcs,
psp_zeta, psp_dkps, psp_xiza, csp_schwarz_f,
nblk, nthb);
if (ret<0) exit(ret);
free(csp_schwarz_f);
}
#endif
ofmo_acc_proc_timer( tid_init );
// ODA
koda = 0;
//for (itera=1, flag_scf=false; itera<=maxscfcyc; itera++) {
for (itera=1, flag_scf=false; itera<=maxitera; itera++) {
ofmo_start_proc_timer( tid_gmat );
ofmo_twoint_eps_ps4(eps_ps4);
ofmo_twoint_eps_eri(eps_eri);
ofmo_twoint_eps_sch(eps_sch);
#pragma omp parallel
{
int nworkers, workerid;
int nthreads, mythread;
nthreads = omp_get_num_threads();
mythread = omp_get_thread_num();
nworkers = nthreads * nprocs;
workerid = myrank * nthreads + mythread;
ofmo_start_thread_timer( cid_gmat, mythread );
ofmo_integ_gen_gmat( nworkers, workerid,
maxlqn, shel_atm, shel_ini, atom_x, atom_y, atom_z,
leading_cs_pair, leading_cs,
csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair,
psp_zeta, psp_dkps, psp_xiza, nao, dD, dG );
ofmo_acc_thread_timer( cid_gmat, mythread );
}
ofmo_acc_proc_timer( tid_gmat );
ofmo_start_proc_timer( tid_allred );
MPI_Allreduce( dG, TMP, nao2, MPI_DOUBLE, MPI_SUM, comm );
memcpy( dG, TMP, sizeof(double)*nao2 );
ofmo_acc_proc_timer( tid_allred );
/*// debug
if ( itera== 1 && nao > 80 ) {
long nzeri, nzeri0;
nzeri0 = ofmo_get_nonzero_eri();
MPI_Reduce( &nzeri0, &nzeri, 1, MPI_LONG, MPI_SUM, 0, comm );
if ( fp_prof ) {
fprintf( fp_prof, "nzeri= %12ld\n", nzeri );
}
}*/
/* form total Fock matrix */
if ( itera == 1 )
for ( int i=0; i<nao2; i++ ) F[i] = H[i] + dG[i];
else
for ( int i=0; i<nao2; i++ ) F[i] += dG[i];
// Fock行列の計算、エネルギーの計算
Enew = ofmo_scf_rhf_energy(nao, D, H, F) + Enuc;
/* DIISをやるかどうかの判定 */
ofmo_start_proc_timer( tid_diis );
ofmo_scale_diag( nao, 2.e0, F );
if ( (errdiis=ofmo_diis_profiling( nao, D, F )) > 0 ) {
if ( dodiis == false && fabs(Enew-Eold)<tol_diis ) {
dodiis = true;
}
} else {
dodiis = false;
}
double *FE = _T_;
if ( dodiis ) ofmo_diis_update( nao, D, F, FE, dodiis );
/* ODA */
if ( dodiis ) {
koda = 0;
} else {
if ( koda == 0 ) {
memcpy( Foda, F, sizeof(double)*nao2 );
Eoda = Enew;
} else {
double s2, c, lambda;
int i;
ofmo_scale_diag( nao, 0.5e0, Doda );
s2 = c = 0.e0;
for ( i=0; i<nao2; i++ ) {
s2 += Foda[i] * Doda[i];
c += (F[i]-Foda[i]) * Doda[i];
}
c *= 2.e0;
ofmo_scale_diag( nao, 2.e0, Doda );
lambda = ( c <= -s2 ? 1.e0 : (-s2/c) );
lambda1 = 1.e0 - lambda;
Eoda += lambda*(s2*2.e0+lambda*c);
for ( i=0; i<nao2; i++ )
Foda[i]=lambda1*Foda[i]+lambda*F[i];
}
koda++;
}
double *Fd;
Fd = ( dodiis ? FE : Foda );
ofmo_acc_proc_timer( tid_diis );
ofmo_unpack_matrix(nao, Fd, C);/* solve generalized symetric
eigenvalue problem */
ofmo_scale_diag( nao, 0.5e0, F );
ofmo_start_proc_timer( tid_diag );
ierr = ofmo_solv_GSEP( nao, U, C, moe );
ofmo_acc_proc_timer( tid_diag );
if ( ierr != 0 ) {
dbg("error in solving GSEP\n");
MPI_Abort( MPI_COMM_WORLD, -1 );
}
/* update density */
ofmo_dcopy( nao2, D, Dold ); /* store previous density matrix */
ofmo_scf_make_rhf_density(nao, nocc, C, D);
// 収束判定を行う
deltaE = Enew - Eold;
deltaD = ofmo_max_diff( nao2, D, Dold );
if ( fp_prof ) {
fprintf(fp_prof, "%5d : %17.10f ( %17.10f ) (%10.7f)\n",
itera, Enew, deltaE, deltaD);
}
if (itera>=minitera) {
if (convType==scc) {
if (deltaE<scfe && deltaD<scfd) {
flag_scf = true;
}
} else if (convType==scf) {
// from rhfuhf in GAMESS
int cvdens = false;
int cvengy = false;
int cvdiis = false;
double diistol = scfe*1.0e-2;
cvdens = (deltaD<scfd && fabs(deltaE)<scfe*10)
|| (deltaD<scfd*0.2e0);
cvengy = (fabs(deltaE)<scfe && deltaD<scfd*2);
errdiis = fabs(errdiis);
cvdiis = (dodiis && errdiis<diistol && deltaD<scfd*2);
if (cvdens || cvengy || cvdiis) { flag_scf = true; };
} else {
if (fabs(deltaE)<scfe && deltaD<scfd) {
flag_scf = true;
}
}
}
ofmo_start_proc_timer( tid_bcast );
MPI_Bcast( &flag_scf, 1, MPI_INT, 0, comm );
ofmo_acc_proc_timer( tid_bcast );
if ( flag_scf == true ) break;
// for incremental Fock generation
for ( int i=0; i<nao2; i++ ) dD[i] = D[i] - Dold[i];
Eold = Enew;
/* ODA */
if ( koda == 1 ) {
memcpy( Doda, dD, sizeof(double)*nao2 );
} else if ( koda > 1 ) {
for ( int i=0; i<nao2; i++ )
Doda[i] = dD[i] + lambda1*Doda[i];
}
} // end for (itera)
MPI_Bcast( &Enew, 1, MPI_DOUBLE, 0, comm );
*Etot = Enew;
ofmo_twoint_free_Dcs();
#ifdef USE_CUDA
{
int ret = 0;
ret = cuda_SCF_Finalize();
if (ret<0) exit(ret);
}
#endif
ofmo_acc_proc_timer( tid_tot );
ofmo_twoint_eps_ps4(eps0_ps4);
ofmo_twoint_eps_eri(eps0_eri);
ofmo_twoint_eps_sch(eps0_sch);
// 最終処理
if (flag_scf == true) {
//if (myrank == 0) printf("==== Allready SCF ====\n");
return 0;
} else {
if (myrank == 0 && fp_prof != NULL)
fprintf(fp_prof, "==== SCF not converged ====\n");
return -1;
}
}
|
ctrsm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "ctrsm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const float complex zero = 0.0f + 0.0f * I;
static const float complex one = 1.0f + 0.0f * I;
void ctrsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, const float complex * restrict A, size_t lda,
float complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float complex temp = B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float complex temp = B[j * ldb + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float complex temp = alpha * B[j * ldb + i];
if (transA == CBlasTrans) {
for (size_t k = 0; k < i; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
}
else {
for (size_t k = 0; k < i; k++)
temp -= conjf(A[i * lda + k]) * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= conjf(A[i * lda + i]);
}
B[j * ldb + i] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register float complex temp = alpha * B[j * ldb + i];
if (transA == CBlasTrans) {
for (size_t k = i + 1; k < m; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
}
else {
for (size_t k = i + 1; k < m; k++)
temp -= conjf(A[i * lda + k]) * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= conjf(A[i * lda + i]);
}
B[j * ldb + i] = temp;
} while (i-- > 0);
}
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register float complex temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float complex temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
}
}
else {
size_t j = n - 1;
do {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register float complex temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float complex temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
} while (j-- > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t k = n - 1;
do {
if (diag == CBlasNonUnit) {
register float complex temp;
if (transA == CBlasTrans)
temp = one / A[k * lda + k];
else
temp = one / conjf(A[k * lda + k]);
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register float complex temp;
if (transA == CBlasTrans)
temp = A[k * lda + j];
else
temp = conjf(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
} while (k-- > 0);
}
else {
for (size_t k = 0; k < n; k++) {
if (diag == CBlasNonUnit) {
register float complex temp;
if (transA == CBlasTrans)
temp = one / A[k * lda + k];
else
temp = one / conjf(A[k * lda + k]);
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register float complex temp;
if (transA == CBlasTrans)
temp = A[k * lda + j];
else
temp = conjf(A[k * lda + j]);
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
}
}
}
}
}
CUresult cuCtrsm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, CUdeviceptr A, size_t lda,
CUdeviceptr B, size_t ldb, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->ctrsm == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->ctrsm, imageBytes));
const unsigned int bx = 4;
const unsigned int by = 4;
const unsigned int mb = (side == CBlasLeft) ? 4 : 16;
const unsigned int nb = (side == CBlasLeft) ? 16 : 4;
char name[112];
snprintf(name, 112,
"_Z5ctrsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEvPK6float2PS4_S4_iiii",
side, uplo, transA, diag, mb, nb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ctrsm, name));
void * params[] = { &A, &B, &alpha, &lda, &ldb, &m, &n };
CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1,
bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUCtrsm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float complex alpha, const float complex * restrict A, size_t lda,
float complex * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
cgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (transA == CBlasNoTrans) ? CGEMM_N_MB : CGEMM_C_MB;
const size_t nb = CGEMM_N_NB;
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, transA, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasUpper, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, transA, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasLeft, CBlasLower, transA, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, transA, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasUpper, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUCgemm(handle, CBlasNoTrans, transA, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
ctrsm(CBlasRight, CBlasLower, transA, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target
*/
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
for (i=0; i<len; i++)
a[i]= i;
#pragma omp target
#pragma omp parallel for
for (i=0;i< len -1 ;i++)
a[i]=a[i]+1;
return 0;
}
|
GB_unop__identity_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
int Nthreads = atoi(argv[argc-1]);
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end;
end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
c-decl.c | /* Process declarations and variables for C compiler.
Copyright (C) 1988-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "intl.h"
#include "print-tree.h"
#include "stor-layout.h"
#include "varasm.h"
#include "attribs.h"
#include "toplev.h"
#include "debug.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "c-family/c-ubsan.h"
#include "c-lang.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "dumpfile.h"
#include "plugin.h"
#include "c-family/c-ada-spec.h"
#include "builtins.h"
#include "spellcheck-tree.h"
#include "gcc-rich-location.h"
#include "asan.h"
#include "c-family/name-hint.h"
#include "c-family/known-headers.h"
#include "c-family/c-spellcheck.h"
/* In grokdeclarator, distinguish syntactic contexts of declarators. */
enum decl_context
{ NORMAL, /* Ordinary declaration */
FUNCDEF, /* Function definition */
PARM, /* Declaration of parm before function body */
FIELD, /* Declaration inside struct or union */
TYPENAME}; /* Typename (inside cast or sizeof) */
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
__attribute__((deprecated)) suppresses warnings of uses of other
deprecated items. */
enum deprecated_states {
DEPRECATED_NORMAL,
DEPRECATED_SUPPRESS
};
/* Nonzero if we have seen an invalid cross reference
to a struct, union, or enum, but not yet printed the message. */
tree pending_invalid_xref;
/* File and line to appear in the eventual error message. */
location_t pending_invalid_xref_location;
/* The file and line that the prototype came from if this is an
old-style definition; used for diagnostics in
store_parm_decls_oldstyle. */
static location_t current_function_prototype_locus;
/* Whether this prototype was built-in. */
static bool current_function_prototype_built_in;
/* The argument type information of this prototype. */
static tree current_function_prototype_arg_types;
/* The argument information structure for the function currently being
defined. */
static struct c_arg_info *current_function_arg_info;
/* The obstack on which parser and related data structures, which are
not live beyond their top-level declaration or definition, are
allocated. */
struct obstack parser_obstack;
/* The current statement tree. */
static GTY(()) struct stmt_tree_s c_stmt_tree;
/* State saving variables. */
tree c_break_label;
tree c_cont_label;
/* A list of decls to be made automatically visible in each file scope. */
static GTY(()) tree visible_builtins;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
int current_function_returns_abnormally;
/* Set to nonzero by `grokdeclarator' for a function
whose return type is defaulted, if warnings for this are desired. */
static int warn_about_return_type;
/* Nonzero when the current toplevel function contains a declaration
of a nested function which is never defined. */
static bool undef_nested_function;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int current_omp_declare_target_attribute;
/* Each c_binding structure describes one binding of an identifier to
a decl. All the decls in a scope - irrespective of namespace - are
chained together by the ->prev field, which (as the name implies)
runs in reverse order. All the decls in a given namespace bound to
a given identifier are chained by the ->shadowed field, which runs
from inner to outer scopes.
The ->decl field usually points to a DECL node, but there are two
exceptions. In the namespace of type tags, the bound entity is a
RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared
identifier is encountered, it is bound to error_mark_node to
suppress further errors about that identifier in the current
function.
The ->u.type field stores the type of the declaration in this scope;
if NULL, the type is the type of the ->decl field. This is only of
relevance for objects with external or internal linkage which may
be redeclared in inner scopes, forming composite types that only
persist for the duration of those scopes. In the external scope,
this stores the composite of all the types declared for this
object, visible or not. The ->inner_comp field (used only at file
scope) stores whether an incomplete array type at file scope was
completed at an inner scope to an array size other than 1.
The ->u.label field is used for labels. It points to a structure
which stores additional information used for warnings.
The depth field is copied from the scope structure that holds this
decl. It is used to preserve the proper ordering of the ->shadowed
field (see bind()) and also for a handful of special-case checks.
Finally, the invisible bit is true for a decl which should be
ignored for purposes of normal name lookup, and the nested bit is
true for a decl that's been bound a second time in an inner scope;
in all such cases, the binding in the outer scope will have its
invisible bit true. */
struct GTY((chain_next ("%h.prev"))) c_binding {
union GTY(()) { /* first so GTY desc can use decl */
tree GTY((tag ("0"))) type; /* the type in this scope */
struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */
} GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u;
tree decl; /* the decl bound */
tree id; /* the identifier it's bound to */
struct c_binding *prev; /* the previous decl in this scope */
struct c_binding *shadowed; /* the innermost decl shadowed by this one */
unsigned int depth : 28; /* depth of this scope */
BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */
BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */
BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */
BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */
location_t locus; /* location for nested bindings */
};
#define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth)
#define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth)
#define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/)
#define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/)
/* Each C symbol points to three linked lists of c_binding structures.
These describe the values of the identifier in the three different
namespaces defined by the language. */
struct GTY(()) lang_identifier {
struct c_common_identifier common_id;
struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */
struct c_binding *tag_binding; /* struct/union/enum tags */
struct c_binding *label_binding; /* labels */
};
/* Validate c-lang.c's assumptions. */
extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate
[(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1];
/* The binding oracle; see c-tree.h. */
void (*c_binding_oracle) (enum c_oracle_request, tree identifier);
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's symbol binding. */
#define I_SYMBOL_CHECKED(node) \
(TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding* *
i_symbol_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->symbol_binding == NULL
&& c_binding_oracle != NULL
&& !I_SYMBOL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_SYMBOL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_SYMBOL, node);
}
return &lid->symbol_binding;
}
#define I_SYMBOL_BINDING(node) (*i_symbol_binding (node))
#define I_SYMBOL_DECL(node) \
(I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's tag binding. */
#define I_TAG_CHECKED(node) \
(TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_tag_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->tag_binding == NULL
&& c_binding_oracle != NULL
&& !I_TAG_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_TAG_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_TAG, node);
}
return &lid->tag_binding;
}
#define I_TAG_BINDING(node) (*i_tag_binding (node))
#define I_TAG_DECL(node) \
(I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's label binding. */
#define I_LABEL_CHECKED(node) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_label_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->label_binding == NULL
&& c_binding_oracle != NULL
&& !I_LABEL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_LABEL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_LABEL, node);
}
return &lid->label_binding;
}
#define I_LABEL_BINDING(node) (*i_label_binding (node))
#define I_LABEL_DECL(node) \
(I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0)
/* The resulting tree type. */
union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node
{
union tree_node GTY ((tag ("0"),
desc ("tree_node_structure (&%h)")))
generic;
struct lang_identifier GTY ((tag ("1"))) identifier;
};
/* Track bindings and other things that matter for goto warnings. For
efficiency, we do not gather all the decls at the point of
definition. Instead, we point into the bindings structure. As
scopes are popped, we update these structures and gather the decls
that matter at that time. */
struct GTY(()) c_spot_bindings {
/* The currently open scope which holds bindings defined when the
label was defined or the goto statement was found. */
struct c_scope *scope;
/* The bindings in the scope field which were defined at the point
of the label or goto. This lets us look at older or newer
bindings in the scope, as appropriate. */
struct c_binding *bindings_in_scope;
/* The number of statement expressions that have started since this
label or goto statement was defined. This is zero if we are at
the same statement expression level. It is positive if we are in
a statement expression started since this spot. It is negative
if this spot was in a statement expression and we have left
it. */
int stmt_exprs;
/* Whether we started in a statement expression but are no longer in
it. This is set to true if stmt_exprs ever goes negative. */
bool left_stmt_expr;
};
/* This structure is used to keep track of bindings seen when a goto
statement is defined. This is only used if we see the goto
statement before we see the label. */
struct GTY(()) c_goto_bindings {
/* The location of the goto statement. */
location_t loc;
/* The bindings of the goto statement. */
struct c_spot_bindings goto_bindings;
};
typedef struct c_goto_bindings *c_goto_bindings_p;
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
struct GTY(()) c_label_vars {
/* The shadowed c_label_vars, when one label shadows another (which
can only happen using a __label__ declaration). */
struct c_label_vars *shadowed;
/* The bindings when the label was defined. */
struct c_spot_bindings label_bindings;
/* A list of decls that we care about: decls about which we should
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
scope. Four scopes are distinguished specially: the innermost or
current scope, the innermost function scope, the file scope (always
the second to outermost) and the outermost or external scope.
Most declarations are recorded in the current scope.
All normal label declarations are recorded in the innermost
function scope, as are bindings of undeclared identifiers to
error_mark_node. (GCC permits nested functions as an extension,
hence the 'innermost' qualifier.) Explicitly declared labels
(using the __label__ extension) appear in the current scope.
Being in the file scope (current_scope == file_scope) causes
special behavior in several places below. Also, under some
conditions the Objective-C front end records declarations in the
file scope even though that isn't the current scope.
All declarations with external linkage are recorded in the external
scope, even if they aren't visible there; this models the fact that
such declarations are visible to the entire program, and (with a
bit of cleverness, see pushdecl) allows diagnosis of some violations
of C99 6.2.2p7 and 6.2.7p2:
If, within the same translation unit, the same identifier appears
with both internal and external linkage, the behavior is
undefined.
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.
Initially only the built-in declarations, which describe compiler
intrinsic functions plus a subset of the standard library, are in
this scope.
The order of the blocks list matters, and it is frequently appended
to. To avoid having to walk all the way to the end of the list on
each insertion, or reverse the list later, we maintain a pointer to
the last list entry. (FIXME: It should be feasible to use a reversed
list here.)
The bindings list is strictly in reverse order of declarations;
pop_scope relies on this. */
struct GTY((chain_next ("%h.outer"))) c_scope {
/* The scope containing this one. */
struct c_scope *outer;
/* The next outermost function scope. */
struct c_scope *outer_function;
/* All bindings in this scope. */
struct c_binding *bindings;
/* For each scope (except the global one), a chain of BLOCK nodes
for all the scopes that were entered and exited one level down. */
tree blocks;
tree blocks_last;
/* The depth of this scope. Used to keep the ->shadowed chain of
bindings sorted innermost to outermost. */
unsigned int depth : 28;
/* True if we are currently filling this scope with parameter
declarations. */
BOOL_BITFIELD parm_flag : 1;
/* True if we saw [*] in this scope. Used to give an error messages
if these appears in a function definition. */
BOOL_BITFIELD had_vla_unspec : 1;
/* True if we already complained about forward parameter decls
in this scope. This prevents double warnings on
foo (int a; int b; ...) */
BOOL_BITFIELD warned_forward_parm_decls : 1;
/* True if this is the outermost block scope of a function body.
This scope contains the parameters, the local variables declared
in the outermost block, and all the labels (except those in
nested functions, or declared at block scope with __label__). */
BOOL_BITFIELD function_body : 1;
/* True means make a BLOCK for this scope no matter what. */
BOOL_BITFIELD keep : 1;
/* True means that an unsuffixed float constant is _Decimal64. */
BOOL_BITFIELD float_const_decimal64 : 1;
/* True if this scope has any label bindings. This is used to speed
up searching for labels when popping scopes, particularly since
labels are normally only found at function scope. */
BOOL_BITFIELD has_label_bindings : 1;
/* True if we should issue a warning if a goto statement crosses any
of the bindings. We still need to check the list of bindings to
find the specific ones we need to warn about. This is true if
decl_jump_unsafe would return true for any of the bindings. This
is used to avoid looping over all the bindings unnecessarily. */
BOOL_BITFIELD has_jump_unsafe_decl : 1;
};
/* The scope currently in effect. */
static GTY(()) struct c_scope *current_scope;
/* The innermost function scope. Ordinary (not explicitly declared)
labels, bindings to error_mark_node, and the lazily-created
bindings of __func__ and its friends get this scope. */
static GTY(()) struct c_scope *current_function_scope;
/* The C file scope. This is reset for each input translation unit. */
static GTY(()) struct c_scope *file_scope;
/* The outermost scope. This is used for all declarations with
external linkage, and only these, hence the name. */
static GTY(()) struct c_scope *external_scope;
/* A chain of c_scope structures awaiting reuse. */
static GTY((deletable)) struct c_scope *scope_freelist;
/* A chain of c_binding structures awaiting reuse. */
static GTY((deletable)) struct c_binding *binding_freelist;
/* Append VAR to LIST in scope SCOPE. */
#define SCOPE_LIST_APPEND(scope, list, decl) do { \
struct c_scope *s_ = (scope); \
tree d_ = (decl); \
if (s_->list##_last) \
BLOCK_CHAIN (s_->list##_last) = d_; \
else \
s_->list = d_; \
s_->list##_last = d_; \
} while (0)
/* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */
#define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \
struct c_scope *t_ = (tscope); \
struct c_scope *f_ = (fscope); \
if (t_->to##_last) \
BLOCK_CHAIN (t_->to##_last) = f_->from; \
else \
t_->to = f_->from; \
t_->to##_last = f_->from##_last; \
} while (0)
/* A c_inline_static structure stores details of a static identifier
referenced in a definition of a function that may be an inline
definition if no subsequent declaration of that function uses
"extern" or does not use "inline". */
struct GTY((chain_next ("%h.next"))) c_inline_static {
/* The location for a diagnostic. */
location_t location;
/* The function that may be an inline definition. */
tree function;
/* The object or function referenced. */
tree static_decl;
/* What sort of reference this is. */
enum c_inline_static_type type;
/* The next such structure or NULL. */
struct c_inline_static *next;
};
/* List of static identifiers used or referenced in functions that may
be inline definitions. */
static GTY(()) struct c_inline_static *c_inline_statics;
/* True means unconditionally make a BLOCK for the next scope pushed. */
static bool keep_next_level_flag;
/* True means the next call to push_scope will be the outermost scope
of a function body, so do not push a new scope, merely cease
expecting parameter decls. */
static bool next_is_function_body;
/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
/* Information that we keep for a struct or union while it is being
parsed. */
struct c_struct_parse_info
{
/* If warn_cxx_compat, a list of types defined within this
struct. */
auto_vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
auto_vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
auto_vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
static struct c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
static tree c_make_fname_decl (location_t, tree, int);
static tree grokdeclarator (const struct c_declarator *,
struct c_declspecs *,
enum decl_context, bool, tree *, tree *, tree *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
static void warn_defaults_to (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
/* T is a statement. Add it to the statement-tree. This is the
C/ObjC version--C++ has a slightly different version of this
function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
if (!building_stmt_list_p ())
push_stmt_list ();
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Build a pointer type using the default pointer mode. */
static tree
c_build_pointer_type (tree to_type)
{
addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC
: TYPE_ADDR_SPACE (to_type);
machine_mode pointer_mode;
if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode)
pointer_mode = targetm.addr_space.pointer_mode (as);
else
pointer_mode = c_default_pointer_mode;
return build_pointer_type_for_mode (to_type, pointer_mode, false);
}
/* Return true if we will want to say something if a goto statement
crosses DECL. */
static bool
decl_jump_unsafe (tree decl)
{
if (error_operand_p (decl))
return false;
/* Always warn about crossing variably modified types. */
if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
return true;
/* Otherwise, only warn if -Wgoto-misses-init and this is an
initialized automatic decl. */
if (warn_jump_misses_init
&& VAR_P (decl)
&& !TREE_STATIC (decl)
&& DECL_INITIAL (decl) != NULL_TREE)
return true;
return false;
}
void
c_print_identifier (FILE *file, tree node, int indent)
{
void (*save) (enum c_oracle_request, tree identifier);
/* Temporarily hide any binding oracle. Without this, calls to
debug_tree from the debugger will end up calling into the oracle,
making for a confusing debug session. As the oracle isn't needed
here for normal operation, it's simplest to suppress it. */
save = c_binding_oracle;
c_binding_oracle = NULL;
print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4);
print_node (file, "tag", I_TAG_DECL (node), indent + 4);
print_node (file, "label", I_LABEL_DECL (node), indent + 4);
if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN)
{
tree rid = ridpointers[C_RID_CODE (node)];
indent_to (file, indent + 4);
fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"",
(void *) rid, IDENTIFIER_POINTER (rid));
}
c_binding_oracle = save;
}
/* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL,
which may be any of several kinds of DECL or TYPE or error_mark_node,
in the scope SCOPE. */
static void
bind (tree name, tree decl, struct c_scope *scope, bool invisible,
bool nested, location_t locus)
{
struct c_binding *b, **here;
if (binding_freelist)
{
b = binding_freelist;
binding_freelist = b->prev;
}
else
b = ggc_alloc<c_binding> ();
b->shadowed = 0;
b->decl = decl;
b->id = name;
b->depth = scope->depth;
b->invisible = invisible;
b->nested = nested;
b->inner_comp = 0;
b->in_struct = 0;
b->locus = locus;
b->u.type = NULL;
b->prev = scope->bindings;
scope->bindings = b;
if (decl_jump_unsafe (decl))
scope->has_jump_unsafe_decl = 1;
if (!name)
return;
switch (TREE_CODE (decl))
{
case LABEL_DECL: here = &I_LABEL_BINDING (name); break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE: here = &I_TAG_BINDING (name); break;
case VAR_DECL:
case FUNCTION_DECL:
case TYPE_DECL:
case CONST_DECL:
case PARM_DECL:
case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break;
default:
gcc_unreachable ();
}
/* Locate the appropriate place in the chain of shadowed decls
to insert this binding. Normally, scope == current_scope and
this does nothing. */
while (*here && (*here)->depth > scope->depth)
here = &(*here)->shadowed;
b->shadowed = *here;
*here = b;
}
/* Clear the binding structure B, stick it on the binding_freelist,
and return the former value of b->prev. This is used by pop_scope
and get_parm_info to iterate destructively over all the bindings
from a given scope. */
static struct c_binding *
free_binding_and_advance (struct c_binding *b)
{
struct c_binding *prev = b->prev;
memset (b, 0, sizeof (struct c_binding));
b->prev = binding_freelist;
binding_freelist = b;
return prev;
}
/* Bind a label. Like bind, but skip fields which aren't used for
labels, and add the LABEL_VARS value. */
static void
bind_label (tree name, tree label, struct c_scope *scope,
struct c_label_vars *label_vars)
{
struct c_binding *b;
bind (name, label, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
scope->has_label_bindings = true;
b = scope->bindings;
gcc_assert (b->decl == label);
label_vars->shadowed = b->u.label;
b->u.label = label_vars;
}
/* Hook called at end of compilation to assume 1 elt
for a file-scope tentative array defn that wasn't complete before. */
void
c_finish_incomplete_decl (tree decl)
{
if (VAR_P (decl))
{
tree type = TREE_TYPE (decl);
if (type != error_mark_node
&& TREE_CODE (type) == ARRAY_TYPE
&& !DECL_EXTERNAL (decl)
&& TYPE_DOMAIN (type) == NULL_TREE)
{
warning_at (DECL_SOURCE_LOCATION (decl),
0, "array %q+D assumed to have one element", decl);
complete_array_type (&TREE_TYPE (decl), NULL_TREE, true);
relayout_decl (decl);
}
}
}
/* Record that inline function FUNC contains a reference (location
LOC) to static DECL (file-scope or function-local according to
TYPE). */
void
record_inline_static (location_t loc, tree func, tree decl,
enum c_inline_static_type type)
{
c_inline_static *csi = ggc_alloc<c_inline_static> ();
csi->location = loc;
csi->function = func;
csi->static_decl = decl;
csi->type = type;
csi->next = c_inline_statics;
c_inline_statics = csi;
}
/* Check for references to static declarations in inline functions at
the end of the translation unit and diagnose them if the functions
are still inline definitions. */
static void
check_inline_statics (void)
{
struct c_inline_static *csi;
for (csi = c_inline_statics; csi; csi = csi->next)
{
if (DECL_EXTERNAL (csi->function))
switch (csi->type)
{
case csi_internal:
pedwarn (csi->location, 0,
"%qD is static but used in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
case csi_modifiable:
pedwarn (csi->location, 0,
"%q+D is static but declared in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
default:
gcc_unreachable ();
}
}
c_inline_statics = NULL;
}
/* Fill in a c_spot_bindings structure. If DEFINING is true, set it
for the current state, otherwise set it to uninitialized. */
static void
set_spot_bindings (struct c_spot_bindings *p, bool defining)
{
if (defining)
{
p->scope = current_scope;
p->bindings_in_scope = current_scope->bindings;
}
else
{
p->scope = NULL;
p->bindings_in_scope = NULL;
}
p->stmt_exprs = 0;
p->left_stmt_expr = false;
}
/* Update spot bindings P as we pop out of SCOPE. Return true if we
should push decls for a label. */
static bool
update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p)
{
if (p->scope != scope)
{
/* This label or goto is defined in some other scope, or it is a
label which is not yet defined. There is nothing to
update. */
return false;
}
/* Adjust the spot bindings to refer to the bindings already defined
in the enclosing scope. */
p->scope = scope->outer;
p->bindings_in_scope = p->scope->bindings;
return true;
}
/* The Objective-C front-end often needs to determine the current scope. */
void *
objc_get_current_scope (void)
{
return current_scope;
}
/* The following function is used only by Objective-C. It needs to live here
because it accesses the innards of c_scope. */
void
objc_mark_locals_volatile (void *enclosing_blk)
{
struct c_scope *scope;
struct c_binding *b;
for (scope = current_scope;
scope && scope != enclosing_blk;
scope = scope->outer)
{
for (b = scope->bindings; b; b = b->prev)
objc_volatilize_decl (b->decl);
/* Do not climb up past the current function. */
if (scope->function_body)
break;
}
}
/* Return true if we are in the global binding level. */
bool
global_bindings_p (void)
{
return current_scope == file_scope;
}
void
keep_next_level (void)
{
keep_next_level_flag = true;
}
/* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */
void
set_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = true;
}
/* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */
void
clear_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = false;
}
/* Return nonzero if an unsuffixed float constant is _Decimal64. */
bool
float_const_decimal64_p (void)
{
return current_scope->float_const_decimal64;
}
/* Identify this scope as currently being filled with parameters. */
void
declare_parm_level (void)
{
current_scope->parm_flag = true;
}
void
push_scope (void)
{
if (next_is_function_body)
{
/* This is the transition from the parameters to the top level
of the function body. These are the same scope
(C99 6.2.1p4,6) so we do not push another scope structure.
next_is_function_body is set only by store_parm_decls, which
in turn is called when and only when we are about to
encounter the opening curly brace for the function body.
The outermost block of a function always gets a BLOCK node,
because the debugging output routines expect that each
function has at least one BLOCK. */
current_scope->parm_flag = false;
current_scope->function_body = true;
current_scope->keep = true;
current_scope->outer_function = current_function_scope;
current_function_scope = current_scope;
keep_next_level_flag = false;
next_is_function_body = false;
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope->outer)
current_scope->float_const_decimal64
= current_scope->outer->float_const_decimal64;
else
current_scope->float_const_decimal64 = false;
}
else
{
struct c_scope *scope;
if (scope_freelist)
{
scope = scope_freelist;
scope_freelist = scope->outer;
}
else
scope = ggc_cleared_alloc<c_scope> ();
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope)
scope->float_const_decimal64 = current_scope->float_const_decimal64;
else
scope->float_const_decimal64 = false;
scope->keep = keep_next_level_flag;
scope->outer = current_scope;
scope->depth = current_scope ? (current_scope->depth + 1) : 0;
/* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but
possible. */
if (current_scope && scope->depth == 0)
{
scope->depth--;
sorry ("GCC supports only %u nested scopes", scope->depth);
}
current_scope = scope;
keep_next_level_flag = false;
}
}
/* This is called when we are leaving SCOPE. For each label defined
in SCOPE, add any appropriate decls to its decls_in_scope fields.
These are the decls whose initialization will be skipped by a goto
later in the function. */
static void
update_label_decls (struct c_scope *scope)
{
struct c_scope *s;
s = scope;
while (s != NULL)
{
if (s->has_label_bindings)
{
struct c_binding *b;
for (b = s->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
struct c_binding *b1;
bool hjud;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
b1 = label_vars->label_bindings.bindings_in_scope;
if (label_vars->label_bindings.scope == NULL)
hjud = false;
else
hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl;
if (update_spot_bindings (scope, &label_vars->label_bindings))
{
/* This label is defined in this scope. */
if (hjud)
{
for (; b1 != NULL; b1 = b1->prev)
{
/* A goto from later in the function to this
label will never see the initialization
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
/* Don't search beyond the current function. */
if (s == current_function_scope)
break;
s = s->outer;
}
}
/* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */
static void
set_type_context (tree type, tree context)
{
for (type = TYPE_MAIN_VARIANT (type); type;
type = TYPE_NEXT_VARIANT (type))
TYPE_CONTEXT (type) = context;
}
/* Exit a scope. Restore the state of the identifier-decl mappings
that were in effect when this scope was entered. Return a BLOCK
node containing all the DECLs in this scope that are of interest
to debug info generation. */
tree
pop_scope (void)
{
struct c_scope *scope = current_scope;
tree block, context, p;
struct c_binding *b;
bool functionbody = scope->function_body;
bool keep = functionbody || scope->keep || scope->bindings;
update_label_decls (scope);
/* If appropriate, create a BLOCK to record the decls for the life
of this function. */
block = NULL_TREE;
if (keep)
{
block = make_node (BLOCK);
BLOCK_SUBBLOCKS (block) = scope->blocks;
TREE_USED (block) = 1;
/* In each subblock, record that this is its superior. */
for (p = scope->blocks; p; p = BLOCK_CHAIN (p))
BLOCK_SUPERCONTEXT (p) = block;
BLOCK_VARS (block) = NULL_TREE;
}
/* The TYPE_CONTEXTs for all of the tagged types belonging to this
scope must be set so that they point to the appropriate
construct, i.e. either to the current FUNCTION_DECL node, or
else to the BLOCK node we just constructed.
Note that for tagged types whose scope is just the formal
parameter list for some function type specification, we can't
properly set their TYPE_CONTEXTs here, because we don't have a
pointer to the appropriate FUNCTION_TYPE node readily available
to us. For those cases, the TYPE_CONTEXTs of the relevant tagged
type nodes get set in `grokdeclarator' as soon as we have created
the FUNCTION_TYPE node which will represent the "scope" for these
"parameter list local" tagged types. */
if (scope->function_body)
context = current_function_decl;
else if (scope == file_scope)
{
tree file_decl
= build_translation_unit_decl (get_identifier (main_input_filename));
context = file_decl;
debug_hooks->register_main_translation_unit (file_decl);
}
else
context = block;
/* Clear all bindings in this scope. */
for (b = scope->bindings; b; b = free_binding_and_advance (b))
{
p = b->decl;
switch (TREE_CODE (p))
{
case LABEL_DECL:
/* Warnings for unused labels, errors for undefined labels. */
if (TREE_USED (p) && !DECL_INITIAL (p))
{
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
I_LABEL_BINDING (b->id) = b->shadowed;
/* Also pop back to the shadowed label_vars. */
release_tree_vector (b->u.label->decls_in_scope);
b->u.label = b->u.label->shadowed;
break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE:
set_type_context (p, context);
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
containing functions. */
if (!TREE_ASM_WRITTEN (p)
&& DECL_INITIAL (p) != NULL_TREE
&& TREE_ADDRESSABLE (p)
&& DECL_ABSTRACT_ORIGIN (p) != NULL_TREE
&& DECL_ABSTRACT_ORIGIN (p) != p)
TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1;
if (!TREE_PUBLIC (p)
&& !DECL_INITIAL (p)
&& !b->nested
&& scope != file_scope
&& scope != external_scope)
{
error ("nested function %q+D declared but never defined", p);
undef_nested_function = true;
}
else if (DECL_DECLARED_INLINE_P (p)
&& TREE_PUBLIC (p)
&& !DECL_INITIAL (p))
{
/* C99 6.7.4p6: "a function with external linkage... declared
with an inline function specifier ... shall also be defined
in the same translation unit." */
if (!flag_gnu89_inline
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p))
&& scope == external_scope)
pedwarn (input_location, 0,
"inline function %q+D declared but never defined", p);
DECL_EXTERNAL (p) = 1;
}
goto common_symbol;
case VAR_DECL:
/* Warnings for unused variables. */
if ((!TREE_USED (p) || !DECL_READ_P (p))
&& !TREE_NO_WARNING (p)
&& !DECL_IN_SYSTEM_HEADER (p)
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
if (!TREE_USED (p))
warning (OPT_Wunused_variable, "unused variable %q+D", p);
else if (DECL_CONTEXT (p) == current_function_decl)
warning_at (DECL_SOURCE_LOCATION (p),
OPT_Wunused_but_set_variable,
"variable %qD set but not used", p);
}
if (b->inner_comp)
{
error ("type of array %q+D completed incompatibly with"
" implicit initialization", p);
}
/* Fall through. */
case TYPE_DECL:
case CONST_DECL:
common_symbol:
/* All of these go in BLOCK_VARS, but only if this is the
binding in the home scope. */
if (!b->nested)
{
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
}
else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope)
{
/* For block local externs add a special
DECL_EXTERNAL decl for debug info generation. */
tree extp = copy_node (p);
DECL_EXTERNAL (extp) = 1;
TREE_STATIC (extp) = 0;
TREE_PUBLIC (extp) = 1;
DECL_INITIAL (extp) = NULL_TREE;
DECL_LANG_SPECIFIC (extp) = NULL;
DECL_CONTEXT (extp) = current_function_decl;
if (TREE_CODE (p) == FUNCTION_DECL)
{
DECL_RESULT (extp) = NULL_TREE;
DECL_SAVED_TREE (extp) = NULL_TREE;
DECL_STRUCT_FUNCTION (extp) = NULL;
}
if (b->locus != UNKNOWN_LOCATION)
DECL_SOURCE_LOCATION (extp) = b->locus;
DECL_CHAIN (extp) = BLOCK_VARS (block);
BLOCK_VARS (block) = extp;
}
/* If this is the file scope set DECL_CONTEXT of each decl to
the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p
work. */
if (scope == file_scope)
{
DECL_CONTEXT (p) = context;
if (TREE_CODE (p) == TYPE_DECL
&& TREE_TYPE (p) != error_mark_node)
set_type_context (TREE_TYPE (p), context);
}
gcc_fallthrough ();
/* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have
already been put there by store_parm_decls. Unused-
parameter warnings are handled by function.c.
error_mark_node obviously does not go in BLOCK_VARS and
does not get unused-variable warnings. */
case PARM_DECL:
case ERROR_MARK:
/* It is possible for a decl not to have a name. We get
here with b->id NULL in this case. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
break;
default:
gcc_unreachable ();
}
}
/* Dispose of the block that we just made inside some higher level. */
if ((scope->function_body || scope == file_scope) && context)
{
DECL_INITIAL (context) = block;
BLOCK_SUPERCONTEXT (block) = context;
}
else if (scope->outer)
{
if (block)
SCOPE_LIST_APPEND (scope->outer, blocks, block);
/* If we did not make a block for the scope just exited, any
blocks made for inner scopes must be carried forward so they
will later become subblocks of something else. */
else if (scope->blocks)
SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks);
}
/* Pop the current scope, and free the structure for reuse. */
current_scope = scope->outer;
if (scope->function_body)
current_function_scope = scope->outer_function;
memset (scope, 0, sizeof (struct c_scope));
scope->outer = scope_freelist;
scope_freelist = scope;
return block;
}
void
push_file_scope (void)
{
tree decl;
if (file_scope)
return;
push_scope ();
file_scope = current_scope;
start_fname_decls ();
for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl))
bind (DECL_NAME (decl), decl, file_scope,
/*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl));
}
void
pop_file_scope (void)
{
/* In case there were missing closebraces, get us back to the global
binding level. */
while (current_scope != file_scope)
pop_scope ();
/* __FUNCTION__ is defined at file scope (""). This
call may not be necessary as my tests indicate it
still works without it. */
finish_fname_decls ();
check_inline_statics ();
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
c_common_write_pch ();
/* Ensure even the callers don't try to finalize the CU. */
flag_syntax_only = 1;
return;
}
/* Pop off the file scope and close this translation unit. */
pop_scope ();
file_scope = 0;
maybe_apply_pending_pragma_weaks ();
}
/* Adjust the bindings for the start of a statement expression. */
void
c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
if (switch_bindings != NULL)
++switch_bindings->stmt_exprs;
}
/* Adjust the bindings for the end of a statement expression. */
void
c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
--label_vars->label_bindings.stmt_exprs;
if (label_vars->label_bindings.stmt_exprs < 0)
{
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
{
g->goto_bindings.left_stmt_expr = true;
g->goto_bindings.stmt_exprs = 0;
}
}
}
}
if (switch_bindings != NULL)
{
--switch_bindings->stmt_exprs;
gcc_assert (switch_bindings->stmt_exprs >= 0);
}
}
/* Push a definition or a declaration of struct, union or enum tag "name".
"type" should be the type node.
We assume that the tag "name" is not already defined, and has a location
of LOC.
Note that the definition may really be just a forward reference.
In that case, the TYPE_SIZE will be zero. */
static void
pushtag (location_t loc, tree name, tree type)
{
/* Record the identifier as the type's name if it has none. */
if (name && !TYPE_NAME (type))
TYPE_NAME (type) = name;
bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc);
/* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
tagged type we just added to the current scope. This fake
NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
to output a representation of a tagged type, and it also gives
us a convenient place to record the "scope start" address for the
tagged type. */
TYPE_STUB_DECL (type) = pushdecl (build_decl (loc,
TYPE_DECL, NULL_TREE, type));
/* An approximation for now, so we can tell this is a function-scope tag.
This will be updated in pop_scope. */
TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
if (warn_cxx_compat && name != NULL_TREE)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b != NULL
&& b->decl != NULL_TREE
&& TREE_CODE (b->decl) == TYPE_DECL
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl))
!= TYPE_MAIN_VARIANT (type)))
{
if (warning_at (loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), b->decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
}
/* An exported interface to pushtag. This is used by the gdb plugin's
binding oracle to introduce a new tag binding. */
void
c_pushtag (location_t loc, tree name, tree type)
{
pushtag (loc, name, type);
}
/* An exported interface to bind a declaration. LOC is the location
to use. DECL is the declaration to bind. The decl's name is used
to determine how it is bound. If DECL is a VAR_DECL, then
IS_GLOBAL determines whether the decl is put into the global (file
and external) scope or the current function's scope; if DECL is not
a VAR_DECL then it is always put into the file scope. */
void
c_bind (location_t loc, tree decl, bool is_global)
{
struct c_scope *scope;
bool nested = false;
if (!VAR_P (decl) || current_function_scope == NULL)
{
/* Types and functions are always considered to be global. */
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else if (is_global)
{
/* Also bind it into the external scope. */
bind (DECL_NAME (decl), decl, external_scope, true, false, loc);
nested = true;
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else
{
DECL_CONTEXT (decl) = current_function_decl;
TREE_PUBLIC (decl) = 0;
scope = current_function_scope;
}
bind (DECL_NAME (decl), decl, scope, false, nested, loc);
}
/* Subroutine of compare_decls. Allow harmless mismatches in return
and argument types provided that the type modes match. This function
return a unified type given a suitable match, and 0 otherwise. */
static tree
match_builtin_function_types (tree newtype, tree oldtype)
{
tree newrettype, oldrettype;
tree newargs, oldargs;
tree trytype, tryargs;
/* Accept the return type of the new declaration if same modes. */
oldrettype = TREE_TYPE (oldtype);
newrettype = TREE_TYPE (newtype);
if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype))
return NULL_TREE;
oldargs = TYPE_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
tryargs = newargs;
while (oldargs || newargs)
{
if (!oldargs
|| !newargs
|| !TREE_VALUE (oldargs)
|| !TREE_VALUE (newargs)
|| TYPE_MODE (TREE_VALUE (oldargs))
!= TYPE_MODE (TREE_VALUE (newargs)))
return NULL_TREE;
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
}
trytype = build_function_type (newrettype, tryargs);
/* Allow declaration to change transaction_safe attribute. */
tree oldattrs = TYPE_ATTRIBUTES (oldtype);
tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs);
tree newattrs = TYPE_ATTRIBUTES (newtype);
tree newtsafe = lookup_attribute ("transaction_safe", newattrs);
if (oldtsafe && !newtsafe)
oldattrs = remove_attribute ("transaction_safe", oldattrs);
else if (newtsafe && !oldtsafe)
oldattrs = tree_cons (get_identifier ("transaction_safe"),
NULL_TREE, oldattrs);
return build_type_attribute_variant (trytype, oldattrs);
}
/* Subroutine of diagnose_mismatched_decls. Check for function type
mismatch involving an empty arglist vs a nonempty one and give clearer
diagnostics. */
static void
diagnose_arglist_conflict (tree newdecl, tree olddecl,
tree newtype, tree oldtype)
{
tree t;
if (TREE_CODE (olddecl) != FUNCTION_DECL
|| !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype))
|| !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE)
|| (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE)))
return;
t = TYPE_ARG_TYPES (oldtype);
if (t == NULL_TREE)
t = TYPE_ARG_TYPES (newtype);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
if (TREE_CHAIN (t) == NULL_TREE
&& TYPE_MAIN_VARIANT (type) != void_type_node)
{
inform (input_location, "a parameter list with an ellipsis can%'t match "
"an empty parameter name list declaration");
break;
}
if (c_type_promotes_to (type) != type)
{
inform (input_location, "an argument type that has a default promotion can%'t match "
"an empty parameter name list declaration");
break;
}
}
}
/* Another subroutine of diagnose_mismatched_decls. OLDDECL is an
old-style function definition, NEWDECL is a prototype declaration.
Diagnose inconsistencies in the argument list. Returns TRUE if
the prototype is compatible, FALSE if not. */
static bool
validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
{
tree newargs, oldargs;
int i;
#define END_OF_ARGLIST(t) ((t) == void_type_node)
oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
i = 1;
for (;;)
{
tree oldargtype = TREE_VALUE (oldargs);
tree newargtype = TREE_VALUE (newargs);
if (oldargtype == error_mark_node || newargtype == error_mark_node)
return false;
oldargtype = (TYPE_ATOMIC (oldargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (oldargtype));
newargtype = (TYPE_ATOMIC (newargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (newargtype));
if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype))
break;
/* Reaching the end of just one list means the two decls don't
agree on the number of arguments. */
if (END_OF_ARGLIST (oldargtype))
{
error ("prototype for %q+D declares more arguments "
"than previous old-style definition", newdecl);
return false;
}
else if (END_OF_ARGLIST (newargtype))
{
error ("prototype for %q+D declares fewer arguments "
"than previous old-style definition", newdecl);
return false;
}
/* Type for passing arg must be consistent with that declared
for the arg. */
else if (!comptypes (oldargtype, newargtype))
{
error ("prototype for %q+D declares argument %d"
" with incompatible type",
newdecl, i);
return false;
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
i++;
}
/* If we get here, no errors were found, but do issue a warning
for this poor-style construct. */
warning (0, "prototype for %q+D follows non-prototype definition",
newdecl);
return true;
#undef END_OF_ARGLIST
}
/* Subroutine of diagnose_mismatched_decls. Report the location of DECL,
first in a pair of mismatched declarations, using the diagnostic
function DIAG. */
static void
locate_old_decl (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
inform (input_location, "previous definition of %q+D was here", decl);
else if (C_DECL_IMPLICIT (decl))
inform (input_location, "previous implicit declaration of %q+D was here", decl);
else
inform (input_location, "previous declaration of %q+D was here", decl);
}
/* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL.
Returns true if the caller should proceed to merge the two, false
if OLDDECL should simply be discarded. As a side effect, issues
all necessary diagnostics for invalid or poor-style combinations.
If it returns true, writes the types of NEWDECL and OLDDECL to
*NEWTYPEP and *OLDTYPEP - these may have been adjusted from
TREE_TYPE (NEWDECL, OLDDECL) respectively. */
static bool
diagnose_mismatched_decls (tree newdecl, tree olddecl,
tree *newtypep, tree *oldtypep)
{
tree newtype, oldtype;
bool pedwarned = false;
bool warned = false;
bool retval = true;
#define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \
&& DECL_EXTERNAL (DECL))
/* If we have error_mark_node for either decl or type, just discard
the previous decl - we're in an error cascade already. */
if (olddecl == error_mark_node || newdecl == error_mark_node)
return false;
*oldtypep = oldtype = TREE_TYPE (olddecl);
*newtypep = newtype = TREE_TYPE (newdecl);
if (oldtype == error_mark_node || newtype == error_mark_node)
return false;
/* Two different categories of symbol altogether. This is an error
unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
error ("%q+D redeclared as different kind of symbol", newdecl);
locate_old_decl (olddecl);
}
else if (TREE_PUBLIC (newdecl))
warning (OPT_Wbuiltin_declaration_mismatch,
"built-in function %q+D declared as non-function",
newdecl);
else
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
return false;
}
/* Enumerators have no linkage, so may only be declared once in a
given scope. */
if (TREE_CODE (olddecl) == CONST_DECL)
{
error ("redeclaration of enumerator %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept harmless mismatch in function types.
This is for the ffs and fprintf builtins. */
tree trytype = match_builtin_function_types (newtype, oldtype);
if (trytype && comptypes (newtype, trytype))
*oldtypep = oldtype = trytype;
else
{
/* If types don't match for a built-in, throw away the
built-in. No point in calling locate_old_decl here, it
won't print anything. */
warning (OPT_Wbuiltin_declaration_mismatch,
"conflicting types for built-in function %q+D",
newdecl);
return false;
}
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_IS_BUILTIN (olddecl))
{
/* A conflicting function declaration for a predeclared
function that isn't actually built in. Objective C uses
these. The new declaration silently overrides everything
but the volatility (i.e. noreturn) indication. See also
below. FIXME: Make Objective C use normal builtins. */
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
return false;
}
/* Permit void foo (...) to match int foo (...) if the latter is
the definition and implicit int was used. See
c-torture/compile/920625-2.c. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl)
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node
&& C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (newdecl) = *newtypep = newtype = oldtype;
C_FUNCTION_IMPLICIT_INT (newdecl) = 0;
}
/* Permit void foo (...) to match an earlier call to foo (...) with
no declared type (thus, implicitly int). */
else if (TREE_CODE (newdecl) == FUNCTION_DECL
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node
&& C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype;
}
else
{
int new_quals = TYPE_QUALS (newtype);
int old_quals = TYPE_QUALS (oldtype);
if (new_quals != old_quals)
{
addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals);
addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals);
if (new_addr != old_addr)
{
if (ADDR_SPACE_GENERIC_P (new_addr))
error ("conflicting named address spaces (generic vs %s) "
"for %q+D",
c_addr_space_name (old_addr), newdecl);
else if (ADDR_SPACE_GENERIC_P (old_addr))
error ("conflicting named address spaces (%s vs generic) "
"for %q+D",
c_addr_space_name (new_addr), newdecl);
else
error ("conflicting named address spaces (%s vs %s) "
"for %q+D",
c_addr_space_name (new_addr),
c_addr_space_name (old_addr),
newdecl);
}
if (CLEAR_QUAL_ADDR_SPACE (new_quals)
!= CLEAR_QUAL_ADDR_SPACE (old_quals))
error ("conflicting type qualifiers for %q+D", newdecl);
}
else
error ("conflicting types for %q+D", newdecl);
diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype);
locate_old_decl (olddecl);
return false;
}
}
/* Redeclaration of a type is a constraint violation (6.7.2.3p1),
but silently ignore the redeclaration if either is in a system
header. (Conflicting redeclarations were handled above.) This
is allowed for C11 if the types are the same, not just
compatible. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
bool types_different = false;
int comptypes_result;
comptypes_result
= comptypes_check_different_types (oldtype, newtype, &types_different);
if (comptypes_result != 1 || types_different)
{
error ("redefinition of typedef %q+D with different type", newdecl);
locate_old_decl (olddecl);
return false;
}
if (DECL_IN_SYSTEM_HEADER (newdecl)
|| DECL_IN_SYSTEM_HEADER (olddecl)
|| TREE_NO_WARNING (newdecl)
|| TREE_NO_WARNING (olddecl))
return true; /* Allow OLDDECL to continue in use. */
if (variably_modified_type_p (newtype, NULL))
{
error ("redefinition of typedef %q+D with variably modified type",
newdecl);
locate_old_decl (olddecl);
}
else if (pedwarn_c99 (input_location, OPT_Wpedantic,
"redefinition of typedef %q+D", newdecl))
locate_old_decl (olddecl);
return true;
}
/* Function declarations can either be 'static' or 'extern' (no
qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore
can never conflict with each other on account of linkage
(6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but
gnu89 mode permits two definitions if one is 'extern inline' and
one is not. The non- extern-inline definition supersedes the
extern-inline definition. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If you declare a built-in function name as static, or
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
if (DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)
&& (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
&& !prototype_p (TREE_TYPE (newdecl)))))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
/* Discard the old built-in function. */
return false;
}
if (DECL_INITIAL (newdecl))
{
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
isn't overriding an extern inline reject the new decl.
In c99, no overriding is allowed in the same translation
unit. */
if ((!DECL_EXTERN_INLINE (olddecl)
|| DECL_EXTERN_INLINE (newdecl)
|| (!flag_gnu89_inline
&& (!DECL_DECLARED_INLINE_P (olddecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl))))
)
&& same_translation_unit_p (newdecl, olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
}
/* If we have a prototype after an old-style function definition,
the argument types must be checked specially. */
else if (DECL_INITIAL (olddecl)
&& !prototype_p (oldtype) && prototype_p (newtype)
&& TYPE_ACTUAL_ARG_TYPES (oldtype)
&& !validate_proto_after_old_defn (newdecl, newtype, oldtype))
{
locate_old_decl (olddecl);
return false;
}
/* A non-static declaration (even an "extern") followed by a
static declaration is undefined behavior per C99 6.2.2p3-5,7.
The same is true for a static forward declaration at block
scope followed by a non-static declaration/definition at file
scope. Static followed by non-static at the same scope is
not undefined behavior, and is the most convenient way to get
some effects (see e.g. what unwind-dw2-fde-glibc.c does to
the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but
we do diagnose it if -Wtraditional. */
if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl))
{
/* Two exceptions to the rule. If olddecl is an extern
inline, or a predeclared function that isn't actually
built in, newdecl silently overrides olddecl. The latter
occur only in Objective C; see also above. (FIXME: Make
Objective C use normal builtins.) */
if (!DECL_IS_BUILTIN (olddecl)
&& !DECL_EXTERN_INLINE (olddecl))
{
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
}
return false;
}
else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl))
{
if (DECL_CONTEXT (olddecl))
{
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
/* Make sure gnu_inline attribute is either not present, or
present on all inline decls. */
if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_DECLARED_INLINE_P (newdecl))
{
bool newa = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl)) != NULL;
bool olda = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)) != NULL;
if (newa != olda)
{
error_at (input_location, "%<gnu_inline%> attribute present on %q+D",
newa ? newdecl : olddecl);
error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl),
"but not here");
}
}
}
else if (VAR_P (newdecl))
{
/* Only variables can be thread-local, and all declarations must
agree on this property. */
if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl))
{
/* Nothing to check. Since OLDDECL is marked threadprivate
and NEWDECL does not have a thread-local attribute, we
will merge the threadprivate attribute into NEWDECL. */
;
}
else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl))
{
if (DECL_THREAD_LOCAL_P (newdecl))
error ("thread-local declaration of %q+D follows "
"non-thread-local declaration", newdecl);
else
error ("non-thread-local declaration of %q+D follows "
"thread-local declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Multiple initialized definitions are not allowed (6.9p3,5). */
if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Objects declared at file scope: if the first declaration had
external linkage (even if it was an external reference) the
second must have external linkage as well, or the behavior is
undefined. If the first declaration had internal linkage, then
the second must too, or else be an external reference (in which
case the composite declaration still has internal linkage).
As for function declarations, we warn about the static-then-
extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */
if (DECL_FILE_SCOPE_P (newdecl)
&& TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl))
{
if (DECL_EXTERNAL (newdecl))
{
if (!DECL_FILE_SCOPE_P (olddecl))
{
error ("extern declaration of %q+D follows "
"declaration with no linkage", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
else
{
if (TREE_PUBLIC (newdecl))
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
else
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Two objects with the same name declared at the same block
scope must both be external references (6.7p3). */
else if (!DECL_FILE_SCOPE_P (newdecl))
{
if (DECL_EXTERNAL (newdecl))
{
/* Extern with initializer at block scope, which will
already have received an error. */
}
else if (DECL_EXTERNAL (olddecl))
{
error ("declaration of %q+D with no linkage follows "
"extern declaration", newdecl);
locate_old_decl (olddecl);
}
else
{
error ("redeclaration of %q+D with no linkage", newdecl);
locate_old_decl (olddecl);
}
return false;
}
/* C++ does not permit a decl to appear multiple times at file
scope. */
if (warn_cxx_compat
&& DECL_FILE_SCOPE_P (newdecl)
&& !DECL_EXTERNAL (newdecl)
&& !DECL_EXTERNAL (olddecl))
warned |= warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wc___compat,
("duplicate declaration of %qD is "
"invalid in C++"),
newdecl);
}
/* warnings */
/* All decls must agree on a visibility. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
warned |= warning (0, "redeclaration of %q+D with different visibility "
"(old visibility preserved)", newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
warned |= diagnose_mismatched_attributes (olddecl, newdecl);
else /* PARM_DECL, VAR_DECL */
{
/* Redeclaration of a parameter is a constraint violation (this is
not explicitly stated, but follows from C99 6.7p3 [no more than
one declaration of the same identifier with no linkage in the
same scope, except type tags] and 6.2.2p6 [parameters have no
linkage]). We must check for a forward parameter declaration,
indicated by TREE_ASM_WRITTEN on the old declaration - this is
an extension, the mandatory diagnostic for which is handled by
mark_forward_parm_decls. */
if (TREE_CODE (newdecl) == PARM_DECL
&& (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl)))
{
error ("redefinition of parameter %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Optional warning for completely redundant decls. */
if (!warned && !pedwarned
&& warn_redundant_decls
/* Don't warn about a function declaration followed by a
definition. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& !DECL_BUILT_IN (newdecl)
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
/* Don't warn about forward parameter decls. */
&& !(TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
/* Don't warn about a variable definition following a declaration. */
&& !(VAR_P (newdecl)
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)))
{
warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D",
newdecl);
}
/* Report location of previous decl/defn. */
if (warned || pedwarned)
locate_old_decl (olddecl);
#undef DECL_EXTERN_INLINE
return retval;
}
/* Subroutine of duplicate_decls. NEWDECL has been found to be
consistent with OLDDECL, but carries new information. Merge the
new information into OLDDECL. This function issues no
diagnostics. */
static void
merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
{
bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) != NULL_TREE);
bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (newdecl)));
bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (olddecl)));
/* For real parm decl following a forward decl, rechain the old decl
in its new location and clear TREE_ASM_WRITTEN (it's not a
forward decl anymore). */
if (TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
{
struct c_binding *b, **here;
for (here = ¤t_scope->bindings; *here; here = &(*here)->prev)
if ((*here)->decl == olddecl)
goto found;
gcc_unreachable ();
found:
b = *here;
*here = b->prev;
b->prev = current_scope->bindings;
current_scope->bindings = b;
TREE_ASM_WRITTEN (olddecl) = 0;
}
DECL_ATTRIBUTES (newdecl)
= targetm.merge_decl_attributes (olddecl, newdecl);
/* For typedefs use the old type, as the new type's DECL_NAME points
at newdecl, which will be ggc_freed. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
/* But NEWTYPE might have an attribute, honor that. */
tree tem = newtype;
newtype = oldtype;
if (TYPE_USER_ALIGN (tem))
{
if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype))
SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem));
TYPE_USER_ALIGN (newtype) = true;
}
/* And remove the new type from the variants list. */
if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl)
{
tree remove = TREE_TYPE (newdecl);
for (tree t = TYPE_MAIN_VARIANT (remove); ;
t = TYPE_NEXT_VARIANT (t))
if (TYPE_NEXT_VARIANT (t) == remove)
{
TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove);
break;
}
}
}
/* Merge the data types specified in the two decls. */
TREE_TYPE (newdecl)
= TREE_TYPE (olddecl)
= composite_type (newtype, oldtype);
/* Lay the type out, unless already done. */
if (!comptypes (oldtype, TREE_TYPE (newdecl)))
{
if (TREE_TYPE (newdecl) != error_mark_node)
layout_type (TREE_TYPE (newdecl));
if (TREE_CODE (newdecl) != FUNCTION_DECL
&& TREE_CODE (newdecl) != TYPE_DECL
&& TREE_CODE (newdecl) != CONST_DECL)
layout_decl (newdecl, 0);
}
else
{
/* Since the type is OLDDECL's, make OLDDECL's size go with. */
DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl);
SET_DECL_MODE (newdecl, DECL_MODE (olddecl));
if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
{
SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl));
DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
}
if (DECL_WARN_IF_NOT_ALIGN (olddecl)
> DECL_WARN_IF_NOT_ALIGN (newdecl))
SET_DECL_WARN_IF_NOT_ALIGN (newdecl,
DECL_WARN_IF_NOT_ALIGN (olddecl));
}
/* Keep the old rtl since we can safely use it. */
if (HAS_RTL_P (olddecl))
COPY_DECL_RTL (olddecl, newdecl);
/* Merge the type qualifiers. */
if (TREE_READONLY (newdecl))
TREE_READONLY (olddecl) = 1;
if (TREE_THIS_VOLATILE (newdecl))
TREE_THIS_VOLATILE (olddecl) = 1;
/* Merge deprecatedness. */
if (TREE_DEPRECATED (newdecl))
TREE_DEPRECATED (olddecl) = 1;
/* If a decl is in a system header and the other isn't, keep the one on the
system header. Otherwise, keep source location of definition rather than
declaration and of prototype rather than non-prototype unless that
prototype is built-in. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (olddecl)
&& !DECL_IN_SYSTEM_HEADER (newdecl) )
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (newdecl)
&& !DECL_IN_SYSTEM_HEADER (olddecl))
DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl);
else if ((DECL_INITIAL (newdecl) == NULL_TREE
&& DECL_INITIAL (olddecl) != NULL_TREE)
|| (old_is_prototype && !new_is_prototype
&& !C_DECL_BUILTIN_PROTOTYPE (olddecl)))
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
/* Merge the initialization information. */
if (DECL_INITIAL (newdecl) == NULL_TREE)
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
/* Merge the threadprivate attribute. */
if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl))
C_DECL_THREADPRIVATE_P (newdecl) = 1;
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS))
{
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
/* Use visibility of whichever declaration had it specified */
if (DECL_VISIBILITY_SPECIFIED (olddecl))
{
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl);
DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
|= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl);
DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl);
TREE_READONLY (newdecl) |= TREE_READONLY (olddecl);
DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
/* For functions, static overrides non-static. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
/* This is since we don't automatically
copy the attributes of NEWDECL into OLDDECL. */
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
/* If this clears `static', clear it in the identifier too. */
if (!TREE_PUBLIC (olddecl))
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
/* In c99, 'extern' declaration before (or after) 'inline' means this
function is not DECL_EXTERNAL, unless 'gnu_inline' attribute
is present. */
if (TREE_CODE (newdecl) == FUNCTION_DECL
&& !flag_gnu89_inline
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !DECL_DECLARED_INLINE_P (olddecl)
|| !DECL_EXTERNAL (olddecl))
&& DECL_EXTERNAL (newdecl)
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl))
&& !current_function_decl)
DECL_EXTERNAL (newdecl) = 0;
/* An inline definition following a static declaration is not
DECL_EXTERNAL. */
if (new_is_definition
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& !TREE_PUBLIC (olddecl))
DECL_EXTERNAL (newdecl) = 0;
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
/* An extern decl does not override previous storage class. */
TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
if (!DECL_EXTERNAL (newdecl))
{
DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
}
}
else
{
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
throw it away, in case it was inlined into a function that
hasn't been written out yet. */
if (new_is_definition && DECL_INITIAL (olddecl))
/* The new defn must not be inline. */
DECL_UNINLINABLE (newdecl) = 1;
else
{
/* If either decl says `inline', this fn is inline, unless
its definition was passed already. */
if (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
DECL_DECLARED_INLINE_P (newdecl) = 1;
DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl)
= (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl));
DECL_DISREGARD_INLINE_LIMITS (newdecl)
= DECL_DISREGARD_INLINE_LIMITS (olddecl)
= (DECL_DISREGARD_INLINE_LIMITS (newdecl)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
if (DECL_BUILT_IN (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl);
DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
C_DECL_DECLARED_BUILTIN (newdecl) = 1;
if (new_is_prototype)
{
C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0;
if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL)
{
enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl);
switch (fncode)
{
/* If a compatible prototype of these builtin functions
is seen, assume the runtime implements it with the
expected semantics. */
case BUILT_IN_STPCPY:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_implicit_p (fncode, true);
break;
default:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_declared_p (fncode, true);
break;
}
copy_attributes_to_builtin (newdecl);
}
}
else
C_DECL_BUILTIN_PROTOTYPE (newdecl)
= C_DECL_BUILTIN_PROTOTYPE (olddecl);
}
/* Preserve function specific target and optimization options */
if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl)
&& !DECL_FUNCTION_SPECIFIC_TARGET (newdecl))
DECL_FUNCTION_SPECIFIC_TARGET (newdecl)
= DECL_FUNCTION_SPECIFIC_TARGET (olddecl);
if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl)
&& !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl))
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl);
/* Also preserve various other info from the definition. */
if (!new_is_definition)
{
tree t;
DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl);
DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl);
DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl));
for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = newdecl;
/* See if we've got a function to instantiate from. */
if (DECL_SAVED_TREE (olddecl))
DECL_ABSTRACT_ORIGIN (newdecl)
= DECL_ABSTRACT_ORIGIN (olddecl);
}
}
/* Merge the USED information. */
if (TREE_USED (olddecl))
TREE_USED (newdecl) = 1;
else if (TREE_USED (newdecl))
TREE_USED (olddecl) = 1;
if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL)
DECL_READ_P (newdecl) |= DECL_READ_P (olddecl);
if (DECL_PRESERVE_P (olddecl))
DECL_PRESERVE_P (newdecl) = 1;
else if (DECL_PRESERVE_P (newdecl))
DECL_PRESERVE_P (olddecl) = 1;
/* Merge DECL_COMMON */
if (VAR_P (olddecl) && VAR_P (newdecl)
&& !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl))
&& !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl)))
DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl);
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID, DECL_CONTEXT and
DECL_ARGUMENTS (if appropriate). */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
tree olddecl_arguments = NULL;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
olddecl_arguments = DECL_ARGUMENTS (olddecl);
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
switch (TREE_CODE (olddecl))
{
case FUNCTION_DECL:
case VAR_DECL:
{
struct symtab_node *snode = olddecl->decl_with_vis.symtab_node;
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
olddecl->decl_with_vis.symtab_node = snode;
if ((DECL_EXTERNAL (olddecl)
|| TREE_PUBLIC (olddecl)
|| TREE_STATIC (olddecl))
&& DECL_SECTION_NAME (newdecl) != NULL)
set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl));
/* This isn't quite correct for something like
int __thread x attribute ((tls_model ("local-exec")));
extern int __thread x;
as we'll lose the "local-exec" model. */
if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl))
set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl));
break;
}
case FIELD_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
case CONST_DECL:
case TYPE_DECL:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
break;
default:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
}
DECL_UID (olddecl) = olddecl_uid;
DECL_CONTEXT (olddecl) = olddecl_context;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
DECL_ARGUMENTS (olddecl) = olddecl_arguments;
}
/* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl
so that encode_section_info has a chance to look at the new decl
flags and attributes. */
if (DECL_RTL_SET_P (olddecl)
&& (TREE_CODE (olddecl) == FUNCTION_DECL
|| (VAR_P (olddecl) && TREE_STATIC (olddecl))))
make_decl_rtl (olddecl);
}
/* Handle when a new declaration NEWDECL has the same name as an old
one OLDDECL in the same binding contour. Prints an error message
if appropriate.
If safely possible, alter OLDDECL to look like NEWDECL, and return
true. Otherwise, return false. */
static bool
duplicate_decls (tree newdecl, tree olddecl)
{
tree newtype = NULL, oldtype = NULL;
if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype))
{
/* Avoid `unused variable' and other warnings for OLDDECL. */
TREE_NO_WARNING (olddecl) = 1;
return false;
}
merge_decls (newdecl, olddecl, newtype, oldtype);
/* The NEWDECL will no longer be needed.
Before releasing the node, be sure to remove function from symbol
table that might have been inserted there to record comdat group.
Be sure to however do not free DECL_STRUCT_FUNCTION because this
structure is shared in between NEWDECL and OLDECL. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
DECL_STRUCT_FUNCTION (newdecl) = NULL;
if (VAR_OR_FUNCTION_DECL_P (newdecl))
{
struct symtab_node *snode = symtab_node::get (newdecl);
if (snode)
snode->remove ();
}
ggc_free (newdecl);
return true;
}
/* Check whether decl-node NEW_DECL shadows an existing declaration. */
static void
warn_if_shadowing (tree new_decl)
{
struct c_binding *b;
/* Shadow warnings wanted? */
if (!(warn_shadow
|| warn_shadow_local
|| warn_shadow_compatible_local)
/* No shadow warnings for internally generated vars. */
|| DECL_IS_BUILTIN (new_decl)
/* No shadow warnings for vars made for inlining. */
|| DECL_FROM_INLINE (new_decl))
return;
/* Is anything being shadowed? Invisible decls do not count. */
for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed)
if (b->decl && b->decl != new_decl && !b->invisible
&& (b->decl == error_mark_node
|| diagnostic_report_warnings_p (global_dc,
DECL_SOURCE_LOCATION (b->decl))))
{
tree old_decl = b->decl;
bool warned = false;
if (old_decl == error_mark_node)
{
warning (OPT_Wshadow, "declaration of %q+D shadows previous "
"non-variable", new_decl);
break;
}
else if (TREE_CODE (old_decl) == PARM_DECL)
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a parameter",
new_decl);
}
else if (DECL_FILE_SCOPE_P (old_decl))
{
/* Do not warn if a variable shadows a function, unless
the variable is a function or a pointer-to-function. */
if (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) != FUNCTION_DECL
&& !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl)))
continue;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow,
"declaration of %qD shadows a global "
"declaration",
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
&& DECL_BUILT_IN (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
break;
}
else
{
enum opt_code warning_code;
/* If '-Wshadow=compatible-local' is specified without other
-Wshadow= flags, we will warn only when the types of the
shadowing variable (i.e. new_decl) and the shadowed variable
(old_decl) are compatible. */
if (warn_shadow)
warning_code = OPT_Wshadow;
else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl)))
warning_code = OPT_Wshadow_compatible_local;
else
warning_code = OPT_Wshadow_local;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code,
"declaration of %qD shadows a previous local",
new_decl);
}
if (warned)
inform (DECL_SOURCE_LOCATION (old_decl),
"shadowed declaration is here");
break;
}
}
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
Returns either X or an old decl for the same name.
If an old decl is returned, it may have been smashed
to agree with what X says. */
tree
pushdecl (tree x)
{
tree name = DECL_NAME (x);
struct c_scope *scope = current_scope;
struct c_binding *b;
bool nested = false;
location_t locus = DECL_SOURCE_LOCATION (x);
/* Must set DECL_CONTEXT for everything not at file scope or
DECL_FILE_SCOPE_P won't work. Local externs don't count
unless they have initializers (which generate code). */
if (current_function_decl
&& (!VAR_OR_FUNCTION_DECL_P (x)
|| DECL_INITIAL (x) || !DECL_EXTERNAL (x)))
DECL_CONTEXT (x) = current_function_decl;
/* Anonymous decls are just inserted in the scope. */
if (!name)
{
bind (name, x, scope, /*invisible=*/false, /*nested=*/false,
locus);
return x;
}
/* First, see if there is another declaration with the same name in
the current scope. If there is, duplicate_decls may do all the
work for us. If duplicate_decls returns false, that indicates
two incompatible decls in the same scope; we are to silently
replace the old one (duplicate_decls has issued all appropriate
diagnostics). In particular, we should not consider possible
duplicates in the external scope, or shadowing. */
b = I_SYMBOL_BINDING (name);
if (b && B_IN_SCOPE (b, scope))
{
struct c_binding *b_ext, *b_use;
tree type = TREE_TYPE (x);
tree visdecl = b->decl;
tree vistype = TREE_TYPE (visdecl);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& COMPLETE_TYPE_P (TREE_TYPE (x)))
b->inner_comp = false;
b_use = b;
b_ext = b;
/* If this is an external linkage declaration, we should check
for compatibility with the type in the external scope before
setting the type at this scope based on the visible
information only. */
if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl))
{
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
b_use = b_ext;
if (b_use->u.type)
TREE_TYPE (b_use->decl) = b_use->u.type;
}
}
if (duplicate_decls (x, b_use->decl))
{
if (b_use != b)
{
/* Save the updated type in the external scope and
restore the proper type for this scope. */
tree thistype;
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
&& DECL_BUILT_IN (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
(b_use->u.type));
TREE_TYPE (b_use->decl) = thistype;
}
return b_use->decl;
}
else
goto skip_external_and_shadow_checks;
}
/* All declarations with external linkage, and all external
references, go in the external scope, no matter what scope is
current. However, the binding in that scope is ignored for
purposes of normal name lookup. A separate binding structure is
created in the requested scope; this governs the normal
visibility of the symbol.
The binding in the externals scope is used exclusively for
detecting duplicate declarations of the same object, no matter
what scope they are in; this is what we do here. (C99 6.2.7p2:
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.) */
if (DECL_EXTERNAL (x) || scope == file_scope)
{
tree type = TREE_TYPE (x);
tree vistype = NULL_TREE;
tree visdecl = NULL_TREE;
bool type_saved = false;
if (b && !B_IN_EXTERNAL_SCOPE (b)
&& VAR_OR_FUNCTION_DECL_P (b->decl)
&& DECL_FILE_SCOPE_P (b->decl))
{
visdecl = b->decl;
vistype = TREE_TYPE (visdecl);
}
if (scope != file_scope
&& !DECL_IN_SYSTEM_HEADER (x))
warning_at (locus, OPT_Wnested_externs,
"nested extern declaration of %qD", x);
while (b && !B_IN_EXTERNAL_SCOPE (b))
{
/* If this decl might be modified, save its type. This is
done here rather than when the decl is first bound
because the type may change after first binding, through
being completed or through attributes being added. If we
encounter multiple such decls, only the first should have
its type saved; the others will already have had their
proper types saved and the types will not have changed as
their scopes will not have been re-entered. */
if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved)
{
b->u.type = TREE_TYPE (b->decl);
type_saved = true;
}
if (B_IN_FILE_SCOPE (b)
&& VAR_P (b->decl)
&& TREE_STATIC (b->decl)
&& TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (b->decl))
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
{
/* Array type completed in inner scope, which should be
diagnosed if the completion does not have size 1 and
it does not get completed in the file scope. */
b->inner_comp = true;
}
b = b->shadowed;
}
/* If a matching external declaration has been found, set its
type to the composite of all the types of that declaration.
After the consistency checks, it will be reset to the
composite of the visible types only. */
if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& b->u.type)
TREE_TYPE (b->decl) = b->u.type;
/* The point of the same_translation_unit_p check here is,
we want to detect a duplicate decl for a construct like
foo() { extern bar(); } ... static bar(); but not if
they are in different translation units. In any case,
the static does not go in the externals scope. */
if (b
&& (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& duplicate_decls (x, b->decl))
{
tree thistype;
if (vistype)
{
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b->decl);
}
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
TREE_TYPE (b->decl) = thistype;
bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true,
locus);
return b->decl;
}
else if (TREE_PUBLIC (x))
{
if (visdecl && !b && duplicate_decls (x, visdecl))
{
/* An external declaration at block scope referring to a
visible entity with internal linkage. The composite
type will already be correct for this scope, so we
just need to fall through to make the declaration in
this scope. */
nested = true;
x = visdecl;
}
else
{
bind (name, x, external_scope, /*invisible=*/true,
/*nested=*/false, locus);
nested = true;
}
}
}
if (TREE_CODE (x) != PARM_DECL)
warn_if_shadowing (x);
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
{
/* So this is a typedef, set its underlying type. */
set_underlying_type (x);
/* If X is a typedef defined in the current function, record it
for the purpose of implementing the -Wunused-local-typedefs
warning. */
record_locally_defined_typedef (x);
}
bind (name, x, scope, /*invisible=*/false, nested, locus);
/* If x's type is incomplete because it's based on a
structure or union which has not yet been fully declared,
attach it to that structure or union type, so we can go
back and complete the variable declaration later, if the
structure or union gets fully declared.
If the input is erroneous, we can have error_mark in the type
slot (e.g. "f(void a, ...)") - that doesn't count as an
incomplete type. */
if (TREE_TYPE (x) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (x)))
{
tree element = TREE_TYPE (x);
while (TREE_CODE (element) == ARRAY_TYPE)
element = TREE_TYPE (element);
element = TYPE_MAIN_VARIANT (element);
if (RECORD_OR_UNION_TYPE_P (element)
&& (TREE_CODE (x) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE)
&& !COMPLETE_TYPE_P (element))
C_TYPE_INCOMPLETE_VARS (element)
= tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element));
}
return x;
}
/* Issue a warning about implicit function declaration. ID is the function
identifier, OLDDECL is a declaration of the function in a different scope,
or NULL_TREE. */
static void
implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
if (!warn_implicit_function_declaration)
return;
bool warned;
name_hint hint;
if (!olddecl)
hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc);
if (flag_isoc99)
{
if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE;"
" did you mean %qs?",
id, hint.suggestion ());
}
else
warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
}
else if (hint)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (hint.suggestion ());
warned = warning_at
(&richloc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE; did you mean %qs?"),
id, hint.suggestion ());
}
else
warned = warning_at (loc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (olddecl && warned)
locate_old_decl (olddecl);
if (!warned)
hint.suppress ();
}
/* This function represents mapping of a function code FCODE
to its respective header. */
static const char *
header_for_builtin_fn (enum built_in_function fcode)
{
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_ACOS):
CASE_FLT_FN (BUILT_IN_ACOSH):
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_ATAN2):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CEIL):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL):
CASE_FLT_FN (BUILT_IN_COPYSIGN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
CASE_FLT_FN (BUILT_IN_COS):
CASE_FLT_FN (BUILT_IN_COSH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_ERFC):
CASE_FLT_FN (BUILT_IN_EXP):
CASE_FLT_FN (BUILT_IN_EXP2):
CASE_FLT_FN (BUILT_IN_EXPM1):
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS):
CASE_FLT_FN (BUILT_IN_FDIM):
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMA):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA):
CASE_FLT_FN (BUILT_IN_FMAX):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX):
CASE_FLT_FN (BUILT_IN_FMIN):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
CASE_FLT_FN (BUILT_IN_HYPOT):
CASE_FLT_FN (BUILT_IN_ILOGB):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LGAMMA):
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LOG):
CASE_FLT_FN (BUILT_IN_LOG10):
CASE_FLT_FN (BUILT_IN_LOG1P):
CASE_FLT_FN (BUILT_IN_LOG2):
CASE_FLT_FN (BUILT_IN_LOGB):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_MODF):
CASE_FLT_FN (BUILT_IN_NAN):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_NEXTAFTER):
CASE_FLT_FN (BUILT_IN_NEXTTOWARD):
CASE_FLT_FN (BUILT_IN_POW):
CASE_FLT_FN (BUILT_IN_REMAINDER):
CASE_FLT_FN (BUILT_IN_REMQUO):
CASE_FLT_FN (BUILT_IN_RINT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SCALBLN):
CASE_FLT_FN (BUILT_IN_SCALBN):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_SINCOS):
CASE_FLT_FN (BUILT_IN_SQRT):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TGAMMA):
CASE_FLT_FN (BUILT_IN_TRUNC):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC):
case BUILT_IN_ISINF:
case BUILT_IN_ISNAN:
return "<math.h>";
CASE_FLT_FN (BUILT_IN_CABS):
CASE_FLT_FN (BUILT_IN_CACOS):
CASE_FLT_FN (BUILT_IN_CACOSH):
CASE_FLT_FN (BUILT_IN_CARG):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CCOS):
CASE_FLT_FN (BUILT_IN_CCOSH):
CASE_FLT_FN (BUILT_IN_CEXP):
CASE_FLT_FN (BUILT_IN_CIMAG):
CASE_FLT_FN (BUILT_IN_CLOG):
CASE_FLT_FN (BUILT_IN_CONJ):
CASE_FLT_FN (BUILT_IN_CPOW):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CREAL):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CSQRT):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
return "<complex.h>";
case BUILT_IN_MEMCHR:
case BUILT_IN_MEMCMP:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMSET:
case BUILT_IN_STRCAT:
case BUILT_IN_STRCHR:
case BUILT_IN_STRCMP:
case BUILT_IN_STRCPY:
case BUILT_IN_STRCSPN:
case BUILT_IN_STRLEN:
case BUILT_IN_STRNCAT:
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCPY:
case BUILT_IN_STRPBRK:
case BUILT_IN_STRRCHR:
case BUILT_IN_STRSPN:
case BUILT_IN_STRSTR:
return "<string.h>";
case BUILT_IN_FPRINTF:
case BUILT_IN_PUTC:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTS:
case BUILT_IN_FSCANF:
case BUILT_IN_FWRITE:
case BUILT_IN_PRINTF:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTS:
case BUILT_IN_SCANF:
case BUILT_IN_SNPRINTF:
case BUILT_IN_SPRINTF:
case BUILT_IN_SSCANF:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VFSCANF:
case BUILT_IN_VPRINTF:
case BUILT_IN_VSCANF:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSPRINTF:
case BUILT_IN_VSSCANF:
return "<stdio.h>";
case BUILT_IN_ISALNUM:
case BUILT_IN_ISALPHA:
case BUILT_IN_ISBLANK:
case BUILT_IN_ISCNTRL:
case BUILT_IN_ISDIGIT:
case BUILT_IN_ISGRAPH:
case BUILT_IN_ISLOWER:
case BUILT_IN_ISPRINT:
case BUILT_IN_ISPUNCT:
case BUILT_IN_ISSPACE:
case BUILT_IN_ISUPPER:
case BUILT_IN_ISXDIGIT:
case BUILT_IN_TOLOWER:
case BUILT_IN_TOUPPER:
return "<ctype.h>";
case BUILT_IN_ISWALNUM:
case BUILT_IN_ISWALPHA:
case BUILT_IN_ISWBLANK:
case BUILT_IN_ISWCNTRL:
case BUILT_IN_ISWDIGIT:
case BUILT_IN_ISWGRAPH:
case BUILT_IN_ISWLOWER:
case BUILT_IN_ISWPRINT:
case BUILT_IN_ISWPUNCT:
case BUILT_IN_ISWSPACE:
case BUILT_IN_ISWUPPER:
case BUILT_IN_ISWXDIGIT:
case BUILT_IN_TOWLOWER:
case BUILT_IN_TOWUPPER:
return "<wctype.h>";
case BUILT_IN_ABORT:
case BUILT_IN_ABS:
case BUILT_IN_CALLOC:
case BUILT_IN_EXIT:
case BUILT_IN_FREE:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_MALLOC:
case BUILT_IN_REALLOC:
case BUILT_IN__EXIT2:
case BUILT_IN_ALIGNED_ALLOC:
return "<stdlib.h>";
case BUILT_IN_IMAXABS:
return "<inttypes.h>";
case BUILT_IN_STRFTIME:
return "<time.h>";
default:
return NULL;
}
}
/* Generate an implicit declaration for identifier FUNCTIONID at LOC as a
function of type int (). */
tree
implicitly_declare (location_t loc, tree functionid)
{
struct c_binding *b;
tree decl = NULL_TREE;
tree asmspec_tree;
for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed)
{
if (B_IN_SCOPE (b, external_scope))
{
decl = b->decl;
break;
}
}
if (decl)
{
if (TREE_CODE (decl) != FUNCTION_DECL)
return decl;
/* FIXME: Objective-C has weird not-really-builtin functions
which are supposed to be visible automatically. They wind up
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
else
{
tree newtype = default_function_type;
if (b->u.type)
TREE_TYPE (decl) = b->u.type;
/* Implicit declaration of a function already declared
(somehow) in a different scope, or as a built-in.
If this is the first time this has happened, warn;
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (DECL_BUILT_IN (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
(TREE_TYPE (decl)));
if (!comptypes (newtype, TREE_TYPE (decl)))
{
bool warned = warning_at (loc, 0, "incompatible implicit "
"declaration of built-in "
"function %qD", decl);
/* See if we can hint which header to include. */
const char *header
= header_for_builtin_fn (DECL_FUNCTION_CODE (decl));
if (header != NULL && warned)
{
rich_location richloc (line_table, loc);
maybe_add_include_fixit (&richloc, header);
inform (&richloc,
"include %qs or provide a declaration of %qD",
header, decl);
}
newtype = TREE_TYPE (decl);
}
}
else
{
if (!comptypes (newtype, TREE_TYPE (decl)))
{
error_at (loc, "incompatible implicit declaration of "
"function %qD", decl);
locate_old_decl (decl);
}
}
b->u.type = TREE_TYPE (decl);
TREE_TYPE (decl) = newtype;
bind (functionid, decl, current_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
}
/* Not seen before. */
decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type);
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
implicit_decl_warning (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
/* C89 says implicit declarations are in the innermost block.
So we record the decl in the standard fashion. */
decl = pushdecl (decl);
/* No need to call objc_check_decl here - it's a function type. */
rest_of_decl_compilation (decl, 0, 0);
/* Write a record describing this implicit function declaration
to the prototypes file (if requested). */
gen_aux_info_record (decl, 0, 1, 0);
/* Possibly apply some default attributes to this implicit declaration. */
decl_attributes (&decl, NULL_TREE, 0);
return decl;
}
/* Issue an error message for a reference to an undeclared variable
ID, including a reference to a builtin outside of function-call
context. Establish a binding of the identifier to error_mark_node
in an appropriate scope, which will suppress further errors for the
same identifier. The error message should be given location LOC. */
void
undeclared_variable (location_t loc, tree id)
{
static bool already = false;
struct c_scope *scope;
if (current_function_decl == NULL_TREE)
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared here (not in a function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared here (not in a function)", id);
scope = current_scope;
}
else
{
if (!objc_diagnose_private_ivar (id))
{
name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc);
if (guessed_id)
{
gcc_rich_location richloc (loc);
richloc.add_fixit_replace (guessed_id.suggestion ());
error_at (&richloc,
"%qE undeclared (first use in this function);"
" did you mean %qs?",
id, guessed_id.suggestion ());
}
else
error_at (loc, "%qE undeclared (first use in this function)", id);
}
if (!already)
{
inform (loc, "each undeclared identifier is reported only"
" once for each function it appears in");
already = true;
}
/* If we are parsing old-style parameter decls, current_function_decl
will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
/* Subroutine of lookup_label, declare_label, define_label: construct a
LABEL_DECL with all the proper frills. Also create a struct
c_label_vars initialized for the current scope. */
static tree
make_label (location_t location, tree name, bool defining,
struct c_label_vars **p_label_vars)
{
tree label = build_decl (location, LABEL_DECL, name, void_type_node);
DECL_CONTEXT (label) = current_function_decl;
SET_DECL_MODE (label, VOIDmode);
c_label_vars *label_vars = ggc_alloc<c_label_vars> ();
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
}
/* Get the LABEL_DECL corresponding to identifier NAME as a label.
Create one if none exists so far for the current function.
This is called when a label is used in a goto expression or
has its address taken. */
tree
lookup_label (tree name)
{
tree label;
struct c_label_vars *label_vars;
if (current_function_scope == 0)
{
error ("label %qE referenced outside of any function", name);
return NULL_TREE;
}
/* Use a label already defined or ref'd with this name, but not if
it is inherited from a containing function and wasn't declared
using __label__. */
label = I_LABEL_DECL (name);
if (label && (DECL_CONTEXT (label) == current_function_decl
|| C_DECLARED_LABEL_FLAG (label)))
{
/* If the label has only been declared, update its apparent
location to point here, for better diagnostics if it
turns out not to have been defined. */
if (DECL_INITIAL (label) == NULL_TREE)
DECL_SOURCE_LOCATION (label) = input_location;
return label;
}
/* No label binding for that identifier; make one. */
label = make_label (input_location, name, false, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
return label;
}
/* Issue a warning about DECL for a goto statement at GOTO_LOC going
to LABEL. */
static void
warn_about_goto (location_t goto_loc, tree label, tree decl)
{
if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
error_at (goto_loc,
"jump into scope of identifier with variably modified type");
else
warning_at (goto_loc, OPT_Wjump_misses_init,
"jump skips variable initialization");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl);
}
/* Look up a label because of a goto statement. This is like
lookup_label, but also issues any appropriate warnings. */
tree
lookup_label_for_goto (location_t loc, tree name)
{
tree label;
struct c_label_vars *label_vars;
unsigned int ix;
tree decl;
label = lookup_label (name);
if (label == NULL_TREE)
return NULL_TREE;
/* If we are jumping to a different function, we can't issue any
useful warnings. */
if (DECL_CONTEXT (label) != current_function_decl)
{
gcc_assert (C_DECLARED_LABEL_FLAG (label));
return label;
}
label_vars = I_LABEL_BINDING (name)->u.label;
/* If the label has not yet been defined, then push this goto on a
list for possible later warnings. */
if (label_vars->label_bindings.scope == NULL)
{
c_goto_bindings *g = ggc_alloc<c_goto_bindings> ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
vec_safe_push (label_vars->gotos, g);
return label;
}
/* If there are any decls in label_vars->decls_in_scope, then this
goto has missed the declaration of the decl. This happens for a
case like
int i = 1;
lab:
...
goto lab;
Issue a warning or error. */
FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
{
error_at (loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
}
return label;
}
/* Make a label named NAME in the current function, shadowing silently
any that may be inherited from containing functions or containing
scopes. This is called for __label__ declarations. */
tree
declare_label (tree name)
{
struct c_binding *b = I_LABEL_BINDING (name);
tree label;
struct c_label_vars *label_vars;
/* Check to make sure that the label hasn't already been declared
at this scope */
if (b && B_IN_CURRENT_SCOPE (b))
{
error ("duplicate label declaration %qE", name);
locate_old_decl (b->decl);
/* Just use the previous declaration. */
return b->decl;
}
label = make_label (input_location, name, false, &label_vars);
C_DECLARED_LABEL_FLAG (label) = 1;
/* Declared labels go in the current scope. */
bind_label (name, label, current_scope, label_vars);
return label;
}
/* When we define a label, issue any appropriate warnings if there are
any gotos earlier in the function which jump to this label. */
static void
check_earlier_gotos (tree label, struct c_label_vars* label_vars)
{
unsigned int ix;
struct c_goto_bindings *g;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* We have a goto to this label. The goto is going forward. In
g->scope, the goto is going to skip any binding which was
defined after g->bindings_in_scope. */
if (g->goto_bindings.scope->has_jump_unsafe_decl)
{
for (b = g->goto_bindings.scope->bindings;
b != g->goto_bindings.bindings_in_scope;
b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
/* We also need to warn about decls defined in any scopes
between the scope of the label and the scope of the goto. */
for (scope = label_vars->label_bindings.scope;
scope != g->goto_bindings.scope;
scope = scope->outer)
{
gcc_assert (scope != NULL);
if (scope->has_jump_unsafe_decl)
{
if (scope == label_vars->label_bindings.scope)
b = label_vars->label_bindings.bindings_in_scope;
else
b = scope->bindings;
for (; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
}
if (g->goto_bindings.stmt_exprs > 0)
{
error_at (g->loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here",
label);
}
}
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
/* Define a label, specifying the location in the source file.
Return the LABEL_DECL node for the label, if the definition is valid.
Otherwise return NULL_TREE. */
tree
define_label (location_t location, tree name)
{
/* Find any preexisting label with this name. It is an error
if that label has already been defined in this function, or
if there is a containing function with a declared label with
the same name. */
tree label = I_LABEL_DECL (name);
if (label
&& ((DECL_CONTEXT (label) == current_function_decl
&& DECL_INITIAL (label) != NULL_TREE)
|| (DECL_CONTEXT (label) != current_function_decl
&& C_DECLARED_LABEL_FLAG (label))))
{
error_at (location, "duplicate label %qD", label);
locate_old_decl (label);
return NULL_TREE;
}
else if (label && DECL_CONTEXT (label) == current_function_decl)
{
struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label;
/* The label has been used or declared already in this function,
but not defined. Update its location to point to this
definition. */
DECL_SOURCE_LOCATION (label) = location;
set_spot_bindings (&label_vars->label_bindings, true);
/* Issue warnings as required about any goto statements from
earlier in the function. */
check_earlier_gotos (label, label_vars);
}
else
{
struct c_label_vars *label_vars;
/* No label binding for that identifier; make one. */
label = make_label (location, name, true, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
}
if (!in_system_header_at (input_location) && lookup_name (name))
warning_at (location, OPT_Wtraditional,
"traditional C lacks a separate namespace "
"for labels, identifier %qE conflicts", name);
/* Mark label as having been defined. */
DECL_INITIAL (label) = error_mark_node;
return label;
}
/* Get the bindings for a new switch statement. This is used to issue
warnings as appropriate for jumps from the switch to case or
default labels. */
struct c_spot_bindings *
c_get_switch_bindings (void)
{
struct c_spot_bindings *switch_bindings;
switch_bindings = XNEW (struct c_spot_bindings);
set_spot_bindings (switch_bindings, true);
return switch_bindings;
}
void
c_release_switch_bindings (struct c_spot_bindings *bindings)
{
gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr);
XDELETE (bindings);
}
/* This is called at the point of a case or default label to issue
warnings about decls as needed. It returns true if it found an
error, not just a warning. */
bool
c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings,
location_t switch_loc, location_t case_loc)
{
bool saw_error;
struct c_scope *scope;
saw_error = false;
for (scope = current_scope;
scope != switch_bindings->scope;
scope = scope->outer)
{
struct c_binding *b;
gcc_assert (scope != NULL);
if (!scope->has_jump_unsafe_decl)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
{
if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE))
{
saw_error = true;
error_at (case_loc,
("switch jumps into scope of identifier with "
"variably modified type"));
}
else
warning_at (case_loc, OPT_Wjump_misses_init,
"switch jumps over variable initialization");
inform (switch_loc, "switch starts here");
inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here",
b->decl);
}
}
}
if (switch_bindings->stmt_exprs > 0)
{
saw_error = true;
error_at (case_loc, "switch jumps into statement expression");
inform (switch_loc, "switch starts here");
}
return saw_error;
}
/* Given NAME, an IDENTIFIER_NODE,
return the structure (or union or enum) definition for that name.
If THISLEVEL_ONLY is nonzero, searches only the current_scope.
CODE says which kind of type the caller wants;
it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
If PLOC is not NULL and this returns non-null, it sets *PLOC to the
location where the tag was defined.
If the wrong kind of type is found, an error is reported. */
static tree
lookup_tag (enum tree_code code, tree name, bool thislevel_only,
location_t *ploc)
{
struct c_binding *b = I_TAG_BINDING (name);
bool thislevel = false;
if (!b || !b->decl)
return NULL_TREE;
/* We only care about whether it's in this level if
thislevel_only was set or it might be a type clash. */
if (thislevel_only || TREE_CODE (b->decl) != code)
{
/* For our purposes, a tag in the external scope is the same as
a tag in the file scope. (Primarily relevant to Objective-C
and its builtin structure tags, which get pushed before the
file scope is created.) */
if (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
thislevel = true;
}
if (thislevel_only && !thislevel)
return NULL_TREE;
if (TREE_CODE (b->decl) != code)
{
/* Definition isn't the kind we were looking for. */
pending_invalid_xref = name;
pending_invalid_xref_location = input_location;
/* If in the same binding level as a declaration as a tag
of a different type, this must not be allowed to
shadow that tag, so give the error immediately.
(For example, "struct foo; union foo;" is invalid.) */
if (thislevel)
pending_xref_error ();
}
if (ploc != NULL)
*ploc = b->locus;
return b->decl;
}
/* Return true if a definition exists for NAME with code CODE. */
bool
tag_exists_p (enum tree_code code, tree name)
{
struct c_binding *b = I_TAG_BINDING (name);
if (b == NULL || b->decl == NULL_TREE)
return false;
return TREE_CODE (b->decl) == code;
}
/* Print an error message now
for a recent invalid struct, union or enum cross reference.
We don't print them immediately because they are not invalid
when used in the `struct foo;' construct for shadowing. */
void
pending_xref_error (void)
{
if (pending_invalid_xref != NULL_TREE)
error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag",
pending_invalid_xref);
pending_invalid_xref = NULL_TREE;
}
/* Look up NAME in the current scope and its superiors
in the namespace of variables, functions and typedefs.
Return a ..._DECL node of some kind representing its definition,
or return NULL_TREE if it is undefined. */
tree
lookup_name (tree name)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b && !b->invisible)
{
maybe_record_typedef_use (b->decl);
return b->decl;
}
return NULL_TREE;
}
/* Similar to `lookup_name' but look only at the indicated scope. */
static tree
lookup_name_in_scope (tree name, struct c_scope *scope)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed)
if (B_IN_SCOPE (b, scope))
return b->decl;
return NULL_TREE;
}
/* Look for the closest match for NAME within the currently valid
scopes.
This finds the identifier with the lowest Levenshtein distance to
NAME. If there are multiple candidates with equal minimal distance,
the first one found is returned. Scopes are searched from innermost
outwards, and within a scope in reverse order of declaration, thus
benefiting candidates "near" to the current scope.
The function also looks for similar macro names to NAME, since a
misspelled macro name will not be expanded, and hence looks like an
identifier to the C frontend.
It also looks for start_typename keywords, to detect "singed" vs "signed"
typos.
Use LOC for any deferred diagnostics. */
name_hint
lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc)
{
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* First, try some well-known names in the C standard library, in case
the user forgot a #include. */
const char *header_hint
= get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name));
if (header_hint)
return name_hint (NULL,
new suggest_missing_header (loc,
IDENTIFIER_POINTER (name),
header_hint));
/* Only suggest names reserved for the implementation if NAME begins
with an underscore. */
bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_');
best_match<tree, tree> bm (name);
/* Look within currently valid scopes. */
for (c_scope *scope = current_scope; scope; scope = scope->outer)
for (c_binding *binding = scope->bindings; binding; binding = binding->prev)
{
if (!binding->id || binding->invisible)
continue;
if (binding->decl == error_mark_node)
continue;
/* Don't use bindings from implicitly declared functions,
as they were likely misspellings themselves. */
if (TREE_CODE (binding->decl) == FUNCTION_DECL)
if (C_DECL_IMPLICIT (binding->decl))
continue;
/* Don't suggest names that are reserved for use by the
implementation, unless NAME began with an underscore. */
if (!consider_implementation_names)
{
const char *suggestion_str = IDENTIFIER_POINTER (binding->id);
if (name_reserved_for_implementation_p (suggestion_str))
continue;
}
switch (kind)
{
case FUZZY_LOOKUP_TYPENAME:
if (TREE_CODE (binding->decl) != TYPE_DECL)
continue;
break;
case FUZZY_LOOKUP_FUNCTION_NAME:
if (TREE_CODE (binding->decl) != FUNCTION_DECL)
{
/* Allow function pointers. */
if ((VAR_P (binding->decl)
|| TREE_CODE (binding->decl) == PARM_DECL)
&& TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl)))
== FUNCTION_TYPE))
break;
continue;
}
break;
default:
break;
}
bm.consider (binding->id);
}
/* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO"
as:
x = SOME_OTHER_MACRO (y);
then "SOME_OTHER_MACRO" will survive to the frontend and show up
as a misspelled identifier.
Use the best distance so far so that a candidate is only set if
a macro is better than anything so far. This allows early rejection
(without calculating the edit distance) of macro names that must have
distance >= bm.get_best_distance (), and means that we only get a
non-NULL result for best_macro_match if it's better than any of
the identifiers already checked, which avoids needless creation
of identifiers for macro hashnodes. */
best_macro_match bmm (name, bm.get_best_distance (), parse_in);
cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate ();
/* If a macro is the closest so far to NAME, use it, creating an
identifier tree node for it. */
if (best_macro)
{
const char *id = (const char *)best_macro->ident.str;
tree macro_as_identifier
= get_identifier_with_length (id, best_macro->ident.len);
bm.set_best_so_far (macro_as_identifier,
bmm.get_best_distance (),
bmm.get_best_candidate_length ());
}
/* Try the "start_typename" keywords to detect
"singed" vs "signed" typos. */
if (kind == FUZZY_LOOKUP_TYPENAME)
{
for (unsigned i = 0; i < num_c_common_reswords; i++)
{
const c_common_resword *resword = &c_common_reswords[i];
if (!c_keyword_starts_typename (resword->rid))
continue;
tree resword_identifier = ridpointers [resword->rid];
if (!resword_identifier)
continue;
gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE);
bm.consider (resword_identifier);
}
}
tree best = bm.get_best_meaningful_candidate ();
if (best)
return name_hint (IDENTIFIER_POINTER (best), NULL);
else
return name_hint (NULL, NULL);
}
/* Create the predefined scalar types of C,
and some nodes representing standard constants (0, 1, (void *) 0).
Initialize the global scope.
Make definitions for built-in primitive functions. */
void
c_init_decl_processing (void)
{
location_t save_loc = input_location;
/* Initialize reserved words for parser. */
c_parse_init ();
current_function_decl = NULL_TREE;
gcc_obstack_init (&parser_obstack);
/* Make the externals scope. */
push_scope ();
external_scope = current_scope;
/* Declarations from c_common_nodes_and_builtins must not be associated
with this input file, lest we get differences between using and not
using preprocessed headers. */
input_location = BUILTINS_LOCATION;
c_common_nodes_and_builtins ();
/* In C, comparisons and TRUTH_* expressions have type int. */
truthvalue_type_node = integer_type_node;
truthvalue_true_node = integer_one_node;
truthvalue_false_node = integer_zero_node;
/* Even in C99, which has a real boolean type. */
pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"),
boolean_type_node));
input_location = save_loc;
make_fname_decl = c_make_fname_decl;
start_fname_decls ();
}
/* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to
give the decl, NAME is the initialization string and TYPE_DEP
indicates whether NAME depended on the type of the function. As we
don't yet implement delayed emission of static data, we mark the
decl as emitted so it is not placed in the output. Anything using
it must therefore pull out the STRING_CST initializer directly.
FIXME. */
static tree
c_make_fname_decl (location_t loc, tree id, int type_dep)
{
const char *name = fname_as_string (type_dep);
tree decl, type, init;
size_t length = strlen (name);
type = build_array_type (char_type_node,
build_index_type (size_int (length)));
type = c_build_qualified_type (type, TYPE_QUAL_CONST);
decl = build_decl (loc, VAR_DECL, id, type);
TREE_STATIC (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
init = build_string (length + 1, name);
free (CONST_CAST (char *, name));
TREE_TYPE (init) = type;
DECL_INITIAL (decl) = init;
TREE_USED (decl) = 1;
if (current_function_decl
/* For invalid programs like this:
void foo()
const char* p = __FUNCTION__;
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
&& current_function_scope)
{
DECL_CONTEXT (decl) = current_function_decl;
bind (id, decl, current_function_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
finish_decl (decl, loc, init, NULL_TREE, NULL_TREE);
return decl;
}
tree
c_builtin_function (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
/* Should never be called on a symbol with a preexisting meaning. */
gcc_assert (!I_SYMBOL_BINDING (id));
bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
tree
c_builtin_function_ext_scope (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
if (external_scope)
bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
/* Called when a declaration is seen that contains no names to declare.
If its type is a reference to a structure, union or enum inherited
from a containing scope, shadow that tag name for the current scope
with a forward reference.
If its type defines a new named structure or union
or defines an enum, it is valid but we need not do anything here.
Otherwise, it is an error. */
void
shadow_tag (const struct c_declspecs *declspecs)
{
shadow_tag_warned (declspecs, 0);
}
/* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning,
but no pedwarn. */
void
shadow_tag_warned (const struct c_declspecs *declspecs, int warned)
{
bool found_tag = false;
if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p)
{
tree value = declspecs->type;
enum tree_code code = TREE_CODE (value);
if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
/* Used to test also that TYPE_SIZE (value) != 0.
That caused warning for `struct foo;' at top level in the file. */
{
tree name = TYPE_NAME (value);
tree t;
found_tag = true;
if (declspecs->restrict_p)
{
error ("invalid use of %<restrict%>");
warned = 1;
}
if (name == NULL_TREE)
{
if (warned != 1 && code != ENUMERAL_TYPE)
/* Empty unnamed enum OK */
{
pedwarn (input_location, 0,
"unnamed struct/union that defines no instances");
warned = 1;
}
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->storage_class != csc_none)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with storage class specifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with type qualifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->alignas_p)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with %<_Alignas%> "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else
{
pending_invalid_xref = NULL_TREE;
t = lookup_tag (code, name, true, NULL);
if (t == NULL_TREE)
{
t = make_node (code);
pushtag (input_location, name, t);
}
}
}
else
{
if (warned != 1 && !in_system_header_at (input_location))
{
pedwarn (input_location, 0,
"useless type name in empty declaration");
warned = 1;
}
}
}
else if (warned != 1 && !in_system_header_at (input_location)
&& declspecs->typedef_p)
{
pedwarn (input_location, 0, "useless type name in empty declaration");
warned = 1;
}
pending_invalid_xref = NULL_TREE;
if (declspecs->inline_p)
{
error ("%<inline%> in empty declaration");
warned = 1;
}
if (declspecs->noreturn_p)
{
error ("%<_Noreturn%> in empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_auto)
{
error ("%<auto%> in file-scope empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_register)
{
error ("%<register%> in file-scope empty declaration");
warned = 1;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->storage_class != csc_none)
{
warning (0, "useless storage class specifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location) && declspecs->thread_p)
{
warning (0, "useless %qs in empty declaration",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
warned = 2;
}
if (!warned
&& !in_system_header_at (input_location)
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
warning (0, "useless type qualifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->alignas_p)
{
warning (0, "useless %<_Alignas%> in empty declaration");
warned = 2;
}
if (warned != 1)
{
if (!found_tag)
pedwarn (input_location, 0, "empty declaration");
}
}
/* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_*
bits. SPECS represents declaration specifiers that the grammar
only permits to contain type qualifiers and attributes. */
int
quals_from_declspecs (const struct c_declspecs *specs)
{
int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0)
| (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0)
| (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0)
| (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0)
| (ENCODE_QUAL_ADDR_SPACE (specs->address_space)));
gcc_assert (!specs->type
&& !specs->decl_attr
&& specs->typespec_word == cts_none
&& specs->storage_class == csc_none
&& !specs->typedef_p
&& !specs->explicit_signed_p
&& !specs->deprecated_p
&& !specs->long_p
&& !specs->long_long_p
&& !specs->short_p
&& !specs->signed_p
&& !specs->unsigned_p
&& !specs->complex_p
&& !specs->inline_p
&& !specs->noreturn_p
&& !specs->thread_p);
return quals;
}
/* Construct an array declarator. LOC is the location of the
beginning of the array (usually the opening brace). EXPR is the
expression inside [], or NULL_TREE. QUALS are the type qualifiers
inside the [] (to be applied to the pointer to which a parameter
array is converted). STATIC_P is true if "static" is inside the
[], false otherwise. VLA_UNSPEC_P is true if the array is [*], a
VLA of unspecified length which is nevertheless a complete type,
false otherwise. The field for the contained declarator is left to
be filled in by set_array_declarator_inner. */
struct c_declarator *
build_array_declarator (location_t loc,
tree expr, struct c_declspecs *quals, bool static_p,
bool vla_unspec_p)
{
struct c_declarator *declarator = XOBNEW (&parser_obstack,
struct c_declarator);
declarator->id_loc = loc;
declarator->kind = cdk_array;
declarator->declarator = 0;
declarator->u.array.dimen = expr;
if (quals)
{
declarator->u.array.attrs = quals->attrs;
declarator->u.array.quals = quals_from_declspecs (quals);
}
else
{
declarator->u.array.attrs = NULL_TREE;
declarator->u.array.quals = 0;
}
declarator->u.array.static_p = static_p;
declarator->u.array.vla_unspec_p = vla_unspec_p;
if (static_p || quals != NULL)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<static%> or type "
"qualifiers in parameter array declarators");
if (vla_unspec_p)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<[*]%> array declarators");
if (vla_unspec_p)
{
if (!current_scope->parm_flag)
{
/* C99 6.7.5.2p4 */
error_at (loc, "%<[*]%> not allowed in other than "
"function prototype scope");
declarator->u.array.vla_unspec_p = false;
return NULL;
}
current_scope->had_vla_unspec = true;
}
return declarator;
}
/* Set the contained declarator of an array declarator. DECL is the
declarator, as constructed by build_array_declarator; INNER is what
appears on the left of the []. */
struct c_declarator *
set_array_declarator_inner (struct c_declarator *decl,
struct c_declarator *inner)
{
decl->declarator = inner;
return decl;
}
/* INIT is a constructor that forms DECL's initializer. If the final
element initializes a flexible array field, add the size of that
initializer to DECL's size. */
static void
add_flexible_array_elts_to_size (tree decl, tree init)
{
tree elt, type;
if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
&& TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)
{
complete_array_type (&type, elt, false);
DECL_SIZE (decl)
= size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type));
DECL_SIZE_UNIT (decl)
= size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type));
}
}
/* Decode a "typename", such as "int **", returning a ..._TYPE node.
Set *EXPR, if EXPR not NULL, to any expression to be evaluated
before the type name, and set *EXPR_CONST_OPERANDS, if
EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may
appear in a constant expression. */
tree
groktypename (struct c_type_name *type_name, tree *expr,
bool *expr_const_operands)
{
tree type;
tree attrs = type_name->specs->attrs;
type_name->specs->attrs = NULL_TREE;
type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME,
false, NULL, &attrs, expr, expr_const_operands,
DEPRECATED_NORMAL);
/* Apply attributes. */
decl_attributes (&type, attrs, 0);
return type;
}
/* Wrapper for decl_attributes that adds some implicit attributes
to VAR_DECLs or FUNCTION_DECLs. */
static tree
c_decl_attributes (tree *node, tree attributes, int flags)
{
/* Add implicit "omp declare target" attribute if requested. */
if (current_omp_declare_target_attribute
&& ((VAR_P (*node) && is_global_var (*node))
|| TREE_CODE (*node) == FUNCTION_DECL))
{
if (VAR_P (*node)
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node)))
attributes = tree_cons (get_identifier ("omp declare target implicit"),
NULL_TREE, attributes);
else
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
}
/* Look up the current declaration with all the attributes merged
so far so that attributes on the current declaration that's
about to be pushed that conflict with the former can be detected,
diagnosed, and rejected as appropriate. */
tree last_decl = lookup_name (DECL_NAME (*node));
if (!last_decl)
last_decl = lookup_name_in_scope (DECL_NAME (*node), external_scope);
return decl_attributes (node, attributes, flags, last_decl);
}
/* Decode a declarator in an ordinary declaration or data definition.
This is called as soon as the type information and variable name
have been parsed, before parsing the initializer if any.
Here we create the ..._DECL node, fill in its type,
and put it on the list of decls for the current context.
The ..._DECL node is returned as the value.
Exception: for arrays where the length is not specified,
the type is left null, to be filled in by `finish_decl'.
Function definitions do not come here; they go to start_function
instead. However, external and forward declarations of functions
do go through here. Structure field declarations are done by
grokfield and not through here. */
tree
start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs,
bool initialized, tree attributes)
{
tree decl;
tree tem;
tree expr = NULL_TREE;
enum deprecated_states deprecated_state = DEPRECATED_NORMAL;
/* An object declared as __attribute__((deprecated)) suppresses
warnings of uses of other deprecated items. */
if (lookup_attribute ("deprecated", attributes))
deprecated_state = DEPRECATED_SUPPRESS;
decl = grokdeclarator (declarator, declspecs,
NORMAL, initialized, NULL, &attributes, &expr, NULL,
deprecated_state);
if (!decl || decl == error_mark_node)
return NULL_TREE;
if (expr)
add_stmt (fold_convert (void_type_node, expr));
if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)))
warning (OPT_Wmain, "%q+D is usually a function", decl);
if (initialized)
/* Is it valid for this decl to have an initializer at all?
If not, set INITIALIZED to zero, which will indirectly
tell 'finish_decl' to ignore the initializer once it is parsed. */
switch (TREE_CODE (decl))
{
case TYPE_DECL:
error ("typedef %qD is initialized (use __typeof__ instead)", decl);
initialized = false;
break;
case FUNCTION_DECL:
error ("function %qD is initialized like a variable", decl);
initialized = false;
break;
case PARM_DECL:
/* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
error ("parameter %qD is initialized", decl);
initialized = false;
break;
default:
/* Don't allow initializations for incomplete types except for
arrays which might be completed by the initialization. */
/* This can happen if the array size is an undefined macro.
We already gave a warning, so we don't need another one. */
if (TREE_TYPE (decl) == error_mark_node)
initialized = false;
else if (COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
/* A complete type is ok if size is fixed. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
|| C_DECL_VARIABLE_SIZE (decl))
{
error ("variable-sized object may not be initialized");
initialized = false;
}
}
else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
{
error ("variable %qD has initializer but incomplete type", decl);
initialized = false;
}
else if (C_DECL_VARIABLE_SIZE (decl))
{
/* Although C99 is unclear about whether incomplete arrays
of VLAs themselves count as VLAs, it does not make
sense to permit them to be initialized given that
ordinary VLAs may not be initialized. */
error ("variable-sized object may not be initialized");
initialized = false;
}
}
if (initialized)
{
if (current_scope == file_scope)
TREE_STATIC (decl) = 1;
/* Tell 'pushdecl' this is an initialized decl
even though we don't yet have the initializer expression.
Also tell 'finish_decl' it may store the real initializer. */
DECL_INITIAL (decl) = error_mark_node;
}
/* If this is a function declaration, write a record describing it to the
prototypes file (if requested). */
if (TREE_CODE (decl) == FUNCTION_DECL)
gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl)));
/* ANSI specifies that a tentative definition which is not merged with
a non-tentative definition behaves exactly like a definition with an
initializer equal to zero. (Section 3.7.2)
-fno-common gives strict ANSI behavior, though this tends to break
a large body of code that grew up without this rule.
Thread-local variables are never common, since there's no entrenched
body of code to break, and it allows more efficient variable references
in the presence of dynamic linking. */
if (VAR_P (decl)
&& !initialized
&& TREE_PUBLIC (decl)
&& !DECL_THREAD_LOCAL_P (decl)
&& !flag_no_common)
DECL_COMMON (decl) = 1;
/* Set attributes here so if duplicate decl, will have proper attributes. */
c_decl_attributes (&decl, attributes, 0);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl))
|| current_function_decl))
{
if (declspecs->storage_class == csc_auto && current_scope != file_scope)
;
else if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (decl)))
{
struct c_declarator *ce = declarator;
if (ce->kind == cdk_pointer)
ce = declarator->declarator;
if (ce->kind == cdk_function)
{
tree args = ce->u.arg_info->parms;
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (type && INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& DECL_UNINLINABLE (decl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl)))
warning (OPT_Wattributes, "inline function %q+D given attribute noinline",
decl);
/* C99 6.7.4p3: An inline definition of a function with external
linkage shall not contain a definition of a modifiable object
with static storage duration... */
if (VAR_P (decl)
&& current_scope != file_scope
&& TREE_STATIC (decl)
&& !TREE_READONLY (decl)
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl))
record_inline_static (input_location, current_function_decl,
decl, csi_modifiable);
if (c_dialect_objc ()
&& VAR_OR_FUNCTION_DECL_P (decl))
objc_check_global_decl (decl);
/* Add this decl to the current scope.
TEM may equal DECL or it may be a previous decl of the same name. */
tem = pushdecl (decl);
if (initialized && DECL_EXTERNAL (tem))
{
DECL_EXTERNAL (tem) = 0;
TREE_STATIC (tem) = 1;
}
return tem;
}
/* Subroutine of finish_decl. TYPE is the type of an uninitialized object
DECL or the non-array element type if DECL is an uninitialized array.
If that type has a const member, diagnose this. */
static void
diagnose_uninitialized_cst_member (tree decl, tree type)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
field_type = strip_array_types (TREE_TYPE (field));
if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST)
{
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const member in %qT is invalid in C++",
strip_array_types (TREE_TYPE (decl)));
inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field);
}
if (RECORD_OR_UNION_TYPE_P (field_type))
diagnose_uninitialized_cst_member (decl, field_type);
}
}
/* Finish processing of a declaration;
install its initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the length of an array type is not known before,
it must be determined now, from the initial value, or it is an error.
INIT_LOC is the location of the initial value. */
void
finish_decl (tree decl, location_t init_loc, tree init,
tree origtype, tree asmspec_tree)
{
tree type;
bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE);
const char *asmspec = 0;
/* If a name was specified, get the string. */
if (VAR_OR_FUNCTION_DECL_P (decl)
&& DECL_FILE_SCOPE_P (decl))
asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree);
if (asmspec_tree)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (VAR_P (decl)
&& TREE_STATIC (decl)
&& global_bindings_p ())
/* So decl is a global variable. Record the types it uses
so that we can decide later to emit debug info for them. */
record_types_used_by_current_var_decl (decl);
/* If `start_decl' didn't like having an initialization, ignore it now. */
if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE)
init = NULL_TREE;
/* Don't crash if parm is initialized. */
if (TREE_CODE (decl) == PARM_DECL)
init = NULL_TREE;
if (init)
store_init_value (init_loc, decl, init, origtype);
if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl)
|| TREE_CODE (decl) == FIELD_DECL))
objc_check_decl (decl);
type = TREE_TYPE (decl);
/* Deduce size of array from initialization, if not already known. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE
&& TREE_CODE (decl) != TYPE_DECL)
{
bool do_default
= (TREE_STATIC (decl)
/* Even if pedantic, an external linkage array
may have incomplete type at first. */
? pedantic && !TREE_PUBLIC (decl)
: !DECL_EXTERNAL (decl));
int failure
= complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl),
do_default);
/* Get the completed type made by complete_array_type. */
type = TREE_TYPE (decl);
switch (failure)
{
case 1:
error ("initializer fails to determine size of %q+D", decl);
break;
case 2:
if (do_default)
error ("array size missing in %q+D", decl);
/* If a `static' var's size isn't known,
make it extern as well as static, so it does not get
allocated.
If it is not `static', then do not mark extern;
finish_incomplete_decl will give it a default size
and it will get allocated. */
else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl))
DECL_EXTERNAL (decl) = 1;
break;
case 3:
error ("zero or negative size array %q+D", decl);
break;
case 0:
/* For global variables, update the copy of the type that
exists in the binding. */
if (TREE_PUBLIC (decl))
{
struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl));
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl))
{
if (b_ext->u.type && comptypes (b_ext->u.type, type))
b_ext->u.type = composite_type (b_ext->u.type, type);
else
b_ext->u.type = type;
}
}
break;
default:
gcc_unreachable ();
}
if (DECL_INITIAL (decl))
TREE_TYPE (DECL_INITIAL (decl)) = type;
relayout_decl (decl);
}
if (VAR_P (decl))
{
if (init && TREE_CODE (init) == CONSTRUCTOR)
add_flexible_array_elts_to_size (decl, init);
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (decl)))
layout_decl (decl, 0);
if (DECL_SIZE (decl) == NULL_TREE
/* Don't give an error if we already gave one earlier. */
&& TREE_TYPE (decl) != error_mark_node
&& (TREE_STATIC (decl)
/* A static variable with an incomplete type
is an error if it is initialized.
Also if it is not file scope.
Otherwise, let it through, but if it is not `extern'
then it may cause an error message later. */
? (DECL_INITIAL (decl) != NULL_TREE
|| !DECL_FILE_SCOPE_P (decl))
/* An automatic variable with an incomplete type
is an error. */
: !DECL_EXTERNAL (decl)))
{
error ("storage size of %q+D isn%'t known", decl);
TREE_TYPE (decl) = error_mark_node;
}
if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& DECL_SIZE (decl) == NULL_TREE
&& TREE_STATIC (decl))
incomplete_record_decls.safe_push (decl);
if (is_global_var (decl) && DECL_SIZE (decl) != NULL_TREE)
{
if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
constant_expression_warning (DECL_SIZE (decl));
else
{
error ("storage size of %q+D isn%'t constant", decl);
TREE_TYPE (decl) = error_mark_node;
}
}
if (TREE_USED (type))
{
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
}
}
/* If this is a function and an assembler name is specified, reset DECL_RTL
so we can give it its new name. Also, update builtin_decl if it
was a normal built-in. */
if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
{
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
/* If #pragma weak was used, mark the decl weak now. */
maybe_apply_pragma_weak (decl);
/* Output the assembler code and/or RTL code for variables and functions,
unless the type is an undefined structure or union.
If not, it will get done when the type is completed. */
if (VAR_OR_FUNCTION_DECL_P (decl))
{
/* Determine the ELF visibility. */
if (TREE_PUBLIC (decl))
c_determine_visibility (decl);
/* This is a no-op in c-lang.c or something real in objc-act.c. */
if (c_dialect_objc ())
objc_check_decl (decl);
if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
ordinary, non-register local variable. Historically,
GCC has accepted -- but ignored -- the ASMSPEC in
this case. */
if (!DECL_FILE_SCOPE_P (decl)
&& VAR_P (decl)
&& !C_DECL_REGISTER (decl)
&& !TREE_STATIC (decl))
warning (0, "ignoring asm-specifier for non-static local "
"variable %q+D", decl);
else
set_user_assembler_name (decl, asmspec);
}
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
|| DECL_INITIAL (decl) == error_mark_node)
/* Don't output anything
when a tentative file-scope definition is seen.
But at end of compilation, do output code for them. */
DECL_DEFER_OUTPUT (decl) = 1;
if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl))
DECL_HARD_REGISTER (decl) = 1;
rest_of_decl_compilation (decl, true, 0);
}
else
{
/* In conjunction with an ASMSPEC, the `register'
keyword indicates that we should place the variable
in a particular register. */
if (asmspec && C_DECL_REGISTER (decl))
{
DECL_HARD_REGISTER (decl) = 1;
/* This cannot be done for a structure with volatile
fields, on which DECL_REGISTER will have been
reset. */
if (!DECL_REGISTER (decl))
error ("cannot put object with volatile field into register");
}
if (TREE_CODE (decl) != FUNCTION_DECL)
{
/* If we're building a variable sized type, and we might be
reachable other than via the top of the current binding
level, then create a new BIND_EXPR so that we deallocate
the object at the right time. */
/* Note that DECL_SIZE can be null due to errors. */
if (DECL_SIZE (decl)
&& !TREE_CONSTANT (DECL_SIZE (decl))
&& STATEMENT_LIST_HAS_LABEL (cur_stmt_list))
{
tree bind;
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
add_stmt (bind);
BIND_EXPR_BODY (bind) = push_stmt_list ();
}
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl),
DECL_EXPR, decl));
}
}
if (!DECL_FILE_SCOPE_P (decl))
{
/* Recompute the RTL of a local array now
if it used to be an incomplete type. */
if (was_incomplete && !is_global_var (decl))
{
/* If we used it already as memory, it must stay in memory. */
TREE_ADDRESSABLE (decl) = TREE_USED (decl);
/* If it's still incomplete now, no init will save it. */
if (DECL_SIZE (decl) == NULL_TREE)
DECL_INITIAL (decl) = NULL_TREE;
}
}
}
if (TREE_CODE (decl) == TYPE_DECL)
{
if (!DECL_FILE_SCOPE_P (decl)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0);
}
/* Install a cleanup (aka destructor) if one was given. */
if (VAR_P (decl) && !TREE_STATIC (decl))
{
tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl));
if (attr)
{
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false);
vec_alloc (v, 1);
v->quick_push (cleanup);
cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl),
vNULL, cleanup_decl, v, NULL);
vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
TREE_USED (cleanup_decl) = 1;
DECL_READ_P (decl) = 1;
push_cleanup (decl, cleanup, false);
}
}
if (warn_cxx_compat
&& VAR_P (decl)
&& !DECL_EXTERNAL (decl)
&& DECL_INITIAL (decl) == NULL_TREE)
{
type = strip_array_types (type);
if (TREE_READONLY (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const %qD is invalid in C++", decl);
else if (RECORD_OR_UNION_TYPE_P (type)
&& C_TYPE_FIELDS_READONLY (type))
diagnose_uninitialized_cst_member (decl, type);
}
if (flag_openmp
&& VAR_P (decl)
&& lookup_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl)))
{
DECL_ATTRIBUTES (decl)
= remove_attribute ("omp declare target implicit",
DECL_ATTRIBUTES (decl));
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl)))
error ("%q+D in declare target directive does not have mappable type",
decl);
else if (!lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (decl))
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier ("omp declare target"),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL.
EXPR is NULL or a pointer to an expression that needs to be
evaluated for the side effects of array size expressions in the
parameters. */
tree
grokparm (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false,
NULL, &attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
return decl;
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL
and push that on the current scope. EXPR is a pointer to an
expression that needs to be evaluated for the side effects of array
size expressions in the parameters. */
void
push_parm_decl (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl;
decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL,
&attrs, expr, NULL, DEPRECATED_NORMAL);
if (decl && DECL_P (decl))
DECL_SOURCE_LOCATION (decl) = parm->loc;
decl_attributes (&decl, attrs, 0);
decl = pushdecl (decl);
finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE);
}
/* Mark all the parameter declarations to date as forward decls.
Also diagnose use of this extension. */
void
mark_forward_parm_decls (void)
{
struct c_binding *b;
if (pedantic && !current_scope->warned_forward_parm_decls)
{
pedwarn (input_location, OPT_Wpedantic,
"ISO C forbids forward parameter declarations");
current_scope->warned_forward_parm_decls = true;
}
for (b = current_scope->bindings; b; b = b->prev)
if (TREE_CODE (b->decl) == PARM_DECL)
TREE_ASM_WRITTEN (b->decl) = 1;
}
/* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound
literal, which may be an incomplete array type completed by the
initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound
literal. NON_CONST is true if the initializers contain something
that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero,
it is the (valid) alignment for this compound literal, as specified
with _Alignas. */
tree
build_compound_literal (location_t loc, tree type, tree init, bool non_const,
unsigned int alignas_align)
{
/* We do not use start_decl here because we have a type, not a declarator;
and do not use finish_decl because the decl should be stored inside
the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */
tree decl;
tree complit;
tree stmt;
if (type == error_mark_node
|| init == error_mark_node)
return error_mark_node;
decl = build_decl (loc, VAR_DECL, NULL_TREE, type);
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
TREE_STATIC (decl) = (current_scope == file_scope);
DECL_CONTEXT (decl) = current_function_decl;
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
TREE_TYPE (decl) = type;
c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl);
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
store_init_value (loc, decl, init, NULL_TREE);
if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type))
{
int failure = complete_array_type (&TREE_TYPE (decl),
DECL_INITIAL (decl), true);
/* If complete_array_type returns 3, it means that the
initial value of the compound literal is empty. Allow it. */
gcc_assert (failure == 0 || failure == 3);
type = TREE_TYPE (decl);
TREE_TYPE (DECL_INITIAL (decl)) = type;
}
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (loc, NULL_TREE, type);
return error_mark_node;
}
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt);
TREE_SIDE_EFFECTS (complit) = 1;
layout_decl (decl, 0);
if (TREE_STATIC (decl))
{
/* This decl needs a name for the assembler output. */
set_compound_literal_name (decl);
DECL_DEFER_OUTPUT (decl) = 1;
DECL_COMDAT (decl) = 1;
pushdecl (decl);
rest_of_decl_compilation (decl, 1, 0);
}
if (non_const)
{
complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit);
C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1;
}
return complit;
}
/* Check the type of a compound literal. Here we just check that it
is valid for C++. */
void
check_compound_literal_type (location_t loc, struct c_type_name *type_name)
{
if (warn_cxx_compat
&& (type_name->specs->typespec_kind == ctsk_tagdef
|| type_name->specs->typespec_kind == ctsk_tagfirstref))
warning_at (loc, OPT_Wc___compat,
"defining a type in a compound literal is invalid in C++");
}
/* Determine whether TYPE is a structure with a flexible array member,
or a union containing such a structure (possibly recursively). */
static bool
flexible_array_type_p (tree type)
{
tree x;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
x = TYPE_FIELDS (type);
if (x == NULL_TREE)
return false;
while (DECL_CHAIN (x) != NULL_TREE)
x = DECL_CHAIN (x);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
return true;
return false;
case UNION_TYPE:
for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x))
{
if (flexible_array_type_p (TREE_TYPE (x)))
return true;
}
return false;
default:
return false;
}
}
/* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME,
replacing with appropriate values if they are invalid. */
static void
check_bitfield_type_and_width (location_t loc, tree *type, tree *width,
tree orig_name)
{
tree type_mv;
unsigned int max_width;
unsigned HOST_WIDE_INT w;
const char *name = (orig_name
? identifier_to_locale (IDENTIFIER_POINTER (orig_name))
: _("<anonymous>"));
/* Detect and ignore out of range field width and process valid
field widths. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (*width)))
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
else
{
if (TREE_CODE (*width) != INTEGER_CST)
{
*width = c_fully_fold (*width, false, NULL);
if (TREE_CODE (*width) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"bit-field %qs width not an integer constant expression",
name);
}
if (TREE_CODE (*width) != INTEGER_CST)
{
error_at (loc, "bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
constant_expression_warning (*width);
if (tree_int_cst_sgn (*width) < 0)
{
error_at (loc, "negative width in bit-field %qs", name);
*width = integer_one_node;
}
else if (integer_zerop (*width) && orig_name)
{
error_at (loc, "zero width for bit-field %qs", name);
*width = integer_one_node;
}
}
/* Detect invalid bit-field type. */
if (TREE_CODE (*type) != INTEGER_TYPE
&& TREE_CODE (*type) != BOOLEAN_TYPE
&& TREE_CODE (*type) != ENUMERAL_TYPE)
{
error_at (loc, "bit-field %qs has invalid type", name);
*type = unsigned_type_node;
}
if (TYPE_WARN_IF_NOT_ALIGN (*type))
{
error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type",
name);
*type = unsigned_type_node;
}
type_mv = TYPE_MAIN_VARIANT (*type);
if (!in_system_header_at (input_location)
&& type_mv != integer_type_node
&& type_mv != unsigned_type_node
&& type_mv != boolean_type_node)
pedwarn_c90 (loc, OPT_Wpedantic,
"type of bit-field %qs is a GCC extension", name);
max_width = TYPE_PRECISION (*type);
if (compare_tree_int (*width, max_width) > 0)
{
error_at (loc, "width of %qs exceeds its type", name);
w = max_width;
*width = build_int_cst (integer_type_node, w);
}
else
w = tree_to_uhwi (*width);
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
|| w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
|| w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning_at (loc, 0, "%qs is narrower than values of its type", name);
}
}
/* Print warning about variable length array if necessary. */
static void
warn_variable_length_array (tree name, tree size)
{
if (TREE_CONSTANT (size))
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids array %qE whose size "
"can%'t be evaluated", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array "
"whose size can%'t be evaluated");
}
else
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable "
"length array");
}
}
/* Print warning about defaulting to int if necessary. */
static void
warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
{
diagnostic_info diagnostic;
va_list ap;
rich_location richloc (line_table, location);
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc,
flag_isoc99 ? DK_PEDWARN : DK_WARNING);
diagnostic.option_index = opt;
diagnostic_report_diagnostic (global_dc, &diagnostic);
va_end (ap);
}
/* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS,
considering only those c_declspec_words found in LIST, which
must be terminated by cdw_number_of_elements. */
static location_t
smallest_type_quals_location (const location_t *locations,
const c_declspec_word *list)
{
location_t loc = UNKNOWN_LOCATION;
while (*list != cdw_number_of_elements)
{
location_t newloc = locations[*list];
if (loc == UNKNOWN_LOCATION
|| (newloc != UNKNOWN_LOCATION && newloc < loc))
loc = newloc;
list++;
}
return loc;
}
/* Given declspecs and a declarator,
determine the name and type of the object declared
and construct a ..._DECL node for it.
(In one case we can return a ..._TYPE node instead.
For invalid input we sometimes return NULL_TREE.)
DECLSPECS is a c_declspecs structure for the declaration specifiers.
DECL_CONTEXT says which syntactic context this declaration is in:
NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
FUNCDEF for a function definition. Like NORMAL but a few different
error messages in each case. Return value may be zero meaning
this definition is too screwy to try to parse.
PARM for a parameter declaration (either within a function prototype
or before a function body). Make a PARM_DECL, or return void_type_node.
TYPENAME if for a typename (in a cast or sizeof).
Don't make a DECL node; just return the ..._TYPE node.
FIELD for a struct or union field; make a FIELD_DECL.
INITIALIZED is true if the decl has an initializer.
WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node
representing the width of the bit-field.
DECL_ATTRS points to the list of attributes that should be added to this
decl. Any nested attributes that belong on the decl itself will be
added to this list.
If EXPR is not NULL, any expressions that need to be evaluated as
part of evaluating variably modified types will be stored in *EXPR.
If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be
set to indicate whether operands in *EXPR can be used in constant
expressions.
DEPRECATED_STATE is a deprecated_states value indicating whether
deprecation warnings should be suppressed.
In the TYPENAME case, DECLARATOR is really an absolute declarator.
It may also be so in the PARM case, for a prototype where the
argument type is specified but not the name.
This function is where the complicated C meanings of `static'
and `extern' are interpreted. */
static tree
grokdeclarator (const struct c_declarator *declarator,
struct c_declspecs *declspecs,
enum decl_context decl_context, bool initialized, tree *width,
tree *decl_attrs, tree *expr, bool *expr_const_operands,
enum deprecated_states deprecated_state)
{
tree type = declspecs->type;
bool threadp = declspecs->thread_p;
enum c_storage_class storage_class = declspecs->storage_class;
int constp;
int restrictp;
int volatilep;
int atomicp;
int type_quals = TYPE_UNQUALIFIED;
tree name = NULL_TREE;
bool funcdef_flag = false;
bool funcdef_syntax = false;
bool size_varies = false;
tree decl_attr = declspecs->decl_attr;
int array_ptr_quals = TYPE_UNQUALIFIED;
tree array_ptr_attrs = NULL_TREE;
bool array_parm_static = false;
bool array_parm_vla_unspec_p = false;
tree returned_attrs = NULL_TREE;
bool bitfield = width != NULL;
tree element_type;
tree orig_qual_type = NULL;
size_t orig_qual_indirect = 0;
struct c_arg_info *arg_info = 0;
addr_space_t as1, as2, address_space;
location_t loc = UNKNOWN_LOCATION;
tree expr_dummy;
bool expr_const_operands_dummy;
enum c_declarator_kind first_non_attr_kind;
unsigned int alignas_align = 0;
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
{
expr = &expr_dummy;
expr_dummy = NULL_TREE;
}
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
if (declspecs->expr)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr,
declspecs->expr);
else
*expr = declspecs->expr;
}
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
funcdef_flag = true, decl_context = NORMAL;
/* Look inside a declarator for the name being declared
and get it as an IDENTIFIER_NODE, for an error message. */
{
const struct c_declarator *decl = declarator;
first_non_attr_kind = cdk_attrs;
while (decl)
switch (decl->kind)
{
case cdk_array:
loc = decl->id_loc;
/* FALL THRU. */
case cdk_function:
case cdk_pointer:
funcdef_syntax = (decl->kind == cdk_function);
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = decl->declarator;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
loc = decl->id_loc;
if (decl->u.id)
name = decl->u.id;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = 0;
break;
default:
gcc_unreachable ();
}
if (name == NULL_TREE)
{
gcc_assert (decl_context == PARM
|| decl_context == TYPENAME
|| (decl_context == FIELD
&& declarator->kind == cdk_id));
gcc_assert (!initialized);
}
}
/* A function definition's declarator must have the form of
a function declarator. */
if (funcdef_flag && !funcdef_syntax)
return NULL_TREE;
/* If this looks like a function definition, make it one,
even if it occurs where parms are expected.
Then store_parm_decls will reject it and not use it as a parm. */
if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag)
decl_context = PARM;
if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (declspecs->type, declspecs->decl_attr);
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope
&& variably_modified_type_p (type, NULL_TREE))
{
if (name)
error_at (loc, "variably modified %qE at file scope", name);
else
error_at (loc, "variably modified field at file scope");
type = integer_type_node;
}
size_varies = C_TYPE_VARIABLE_SIZE (type) != 0;
/* Diagnose defaulting to "int". */
if (declspecs->default_int_p && !in_system_header_at (input_location))
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
prefer the former warning since it is more explicit. */
if ((warn_implicit_int || warn_return_type || flag_isoc99)
&& funcdef_flag)
warn_about_return_type = 1;
else
{
if (name)
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in declaration "
"of %qE", name);
else
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
/* Adjust the type if a bit-field is being declared,
-funsigned-bitfields applied and the type is not explicitly
"signed". */
if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p
&& TREE_CODE (type) == INTEGER_TYPE)
type = unsigned_type_for (type);
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
something like `typedef const int CI; CI i' where the type of the
declaration contains the `const'. A third possibility is that
there is a type qualifier on the element type of a typedefed
array type, in which case we should extract that qualifier so
that c_apply_type_quals_to_decl receives the full list of
qualifiers to work with (C90 is not entirely clear about whether
duplicate qualifiers should be diagnosed in this case, but it
seems most appropriate to do so). */
element_type = strip_array_types (type);
constp = declspecs->const_p + TYPE_READONLY (element_type);
restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type);
volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type);
atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type);
as1 = declspecs->address_space;
as2 = TYPE_ADDR_SPACE (element_type);
address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1;
if (constp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>");
if (restrictp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>");
if (volatilep > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>");
if (atomicp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>");
if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2)
error_at (loc, "conflicting named address spaces (%s vs %s)",
c_addr_space_name (as1), c_addr_space_name (as2));
if ((TREE_CODE (type) == ARRAY_TYPE
|| first_non_attr_kind == cdk_array)
&& TYPE_QUALS (element_type))
{
orig_qual_type = type;
type = TYPE_MAIN_VARIANT (type);
}
type_quals = ((constp ? TYPE_QUAL_CONST : 0)
| (restrictp ? TYPE_QUAL_RESTRICT : 0)
| (volatilep ? TYPE_QUAL_VOLATILE : 0)
| (atomicp ? TYPE_QUAL_ATOMIC : 0)
| ENCODE_QUAL_ADDR_SPACE (address_space));
if (type_quals != TYPE_QUALS (element_type))
orig_qual_type = NULL_TREE;
/* Applying the _Atomic qualifier to an array type (through the use
of typedefs or typeof) must be detected here. If the qualifier
is introduced later, any appearance of applying it to an array is
actually applying it to an element of that array. */
if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
/* Warn about storage classes that are invalid for certain
kinds of declarations (parameters, typenames, etc.). */
if (funcdef_flag
&& (threadp
|| storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
pedwarn (loc,
(current_scope == file_scope) ? 0 : OPT_Wpedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
if (storage_class == csc_typedef)
error_at (loc, "function definition declared %<typedef%>");
if (threadp)
error_at (loc, "function definition declared %qs",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
if (storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef)
storage_class = csc_none;
}
else if (decl_context != NORMAL && (storage_class != csc_none || threadp))
{
if (decl_context == PARM && storage_class == csc_register)
;
else
{
switch (decl_context)
{
case FIELD:
if (name)
error_at (loc, "storage class specified for structure "
"field %qE", name);
else
error_at (loc, "storage class specified for structure field");
break;
case PARM:
if (name)
error_at (loc, "storage class specified for parameter %qE",
name);
else
error_at (loc, "storage class specified for unnamed parameter");
break;
default:
error_at (loc, "storage class specified for typename");
break;
}
storage_class = csc_none;
threadp = false;
}
}
else if (storage_class == csc_extern
&& initialized
&& !funcdef_flag)
{
/* 'extern' with initialization is invalid if not at file scope. */
if (current_scope == file_scope)
{
/* It is fine to have 'extern const' when compiling at C
and C++ intersection. */
if (!(warn_cxx_compat && constp))
warning_at (loc, 0, "%qE initialized and declared %<extern%>",
name);
}
else
error_at (loc, "%qE has both %<extern%> and initializer", name);
}
else if (current_scope == file_scope)
{
if (storage_class == csc_auto)
error_at (loc, "file-scope declaration of %qE specifies %<auto%>",
name);
if (pedantic && storage_class == csc_register)
pedwarn (input_location, OPT_Wpedantic,
"file-scope declaration of %qE specifies %<register%>", name);
}
else
{
if (storage_class == csc_extern && funcdef_flag)
error_at (loc, "nested function %qE declared %<extern%>", name);
else if (threadp && storage_class == csc_none)
{
error_at (loc, "function-scope %qE implicitly auto and declared "
"%qs", name,
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
}
}
/* Now figure out the structure of the declarator proper.
Descend through it, creating more complex types, until we reach
the declared identifier (or NULL_TREE, in an absolute declarator).
At each stage we maintain an unqualified version of the type
together with any qualifiers that should be applied to it with
c_build_qualified_type; this way, array types including
multidimensional array types are first built up in unqualified
form and then the qualified form is created with
TYPE_MAIN_VARIANT pointing to the unqualified form. */
while (declarator && declarator->kind != cdk_id)
{
if (type == error_mark_node)
{
declarator = declarator->declarator;
continue;
}
/* Each level of DECLARATOR is either a cdk_array (for ...[..]),
a cdk_pointer (for *...),
a cdk_function (for ...(...)),
a cdk_attrs (for nested attributes),
or a cdk_id (for the name being declared
or the place in an absolute declarator
where the name was omitted).
For the last case, we have just exited the loop.
At this point, TYPE is the type of elements of an array,
or for a function to return, or for a pointer to point to.
After this sequence of ifs, TYPE is the type of the
array or function or pointer, and DECLARATOR has had its
outermost layer removed. */
if (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static)
{
/* Only the innermost declarator (making a parameter be of
array type which is converted to pointer type)
may have static or type qualifiers. */
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
switch (declarator->kind)
{
case cdk_attrs:
{
/* A declarator with embedded attributes. */
tree attrs = declarator->u.attrs;
const struct c_declarator *inner_decl;
int attr_flags = 0;
declarator = declarator->declarator;
inner_decl = declarator;
while (inner_decl->kind == cdk_attrs)
inner_decl = inner_decl->declarator;
if (inner_decl->kind == cdk_id)
attr_flags |= (int) ATTR_FLAG_DECL_NEXT;
else if (inner_decl->kind == cdk_function)
attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT;
else if (inner_decl->kind == cdk_array)
attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT;
returned_attrs = decl_attributes (&type,
chainon (returned_attrs, attrs),
attr_flags);
break;
}
case cdk_array:
{
tree itype = NULL_TREE;
tree size = declarator->u.array.dimen;
/* The index is a signed object `sizetype' bits wide. */
tree index_type = c_common_signed_type (sizetype);
array_ptr_quals = declarator->u.array.quals;
array_ptr_attrs = declarator->u.array.attrs;
array_parm_static = declarator->u.array.static_p;
array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p;
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
if (VOID_TYPE_P (type))
{
if (name)
error_at (loc, "declaration of %qE as array of voids", name);
else
error_at (loc, "declaration of type name as array of voids");
type = error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "declaration of %qE as array of functions",
name);
else
error_at (loc, "declaration of type name as array of "
"functions");
type = error_mark_node;
}
if (pedantic && !in_system_header_at (input_location)
&& flexible_array_type_p (type))
pedwarn (loc, OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (size == error_mark_node)
type = error_mark_node;
if (type == error_mark_node)
continue;
/* If size was specified, set ITYPE to a range-type for
that size. Otherwise, ITYPE remains null. finish_decl
may figure it out from an initial value. */
if (size)
{
bool size_maybe_const = true;
bool size_int_const = (TREE_CODE (size) == INTEGER_CST
&& !TREE_OVERFLOW (size));
bool this_size_varies = false;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has non-integer type",
name);
else
error_at (loc,
"size of unnamed array has non-integer type");
size = integer_one_node;
}
/* This can happen with enum forward declaration. */
else if (!COMPLETE_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has incomplete type",
name);
else
error_at (loc, "size of unnamed array has incomplete "
"type");
size = integer_one_node;
}
size = c_fully_fold (size, false, &size_maybe_const);
if (pedantic && size_maybe_const && integer_zerop (size))
{
if (name)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array %qE", name);
else
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array");
}
if (TREE_CODE (size) == INTEGER_CST && size_maybe_const)
{
constant_expression_warning (size);
if (tree_int_cst_sgn (size) < 0)
{
if (name)
error_at (loc, "size of array %qE is negative", name);
else
error_at (loc, "size of unnamed array is negative");
size = integer_one_node;
}
/* Handle a size folded to an integer constant but
not an integer constant expression. */
if (!size_int_const)
{
/* If this is a file scope declaration of an
ordinary identifier, this is invalid code;
diagnosing it here and not subsequently
treating the type as variable-length avoids
more confusing diagnostics later. */
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
pedwarn (input_location, 0,
"variably modified %qE at file scope",
name);
else
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
}
}
else if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
{
error_at (loc, "variably modified %qE at file scope", name);
size = integer_one_node;
}
else
{
/* Make sure the array size remains visibly
nonconstant even if it is (eg) a const variable
with known value. */
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (sanitize_flags_p (SANITIZE_VLA)
&& current_function_decl != NULL_TREE
&& decl_context == NORMAL)
{
/* Evaluate the array size only once. */
size = save_expr (size);
size = c_fully_fold (size, false, NULL);
size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size),
ubsan_instrument_vla (loc, size),
size);
}
}
if (integer_zerop (size) && !this_size_varies)
{
/* A zero-length array cannot be represented with
an unsigned index type, which is what we'll
get with build_index_type. Create an
open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
{
/* Arrange for the SAVE_EXPR on the inside of the
MINUS_EXPR, which allows the -1 to get folded
with the +1 that happens when building TYPE_SIZE. */
if (size_varies)
size = save_expr (size);
if (this_size_varies && TREE_CODE (size) == INTEGER_CST)
size = build2 (COMPOUND_EXPR, TREE_TYPE (size),
integer_zero_node, size);
/* Compute the maximum valid index, that is, size
- 1. Do the calculation in index_type, so that
if it is a variable the computations will be
done in the proper mode. */
itype = fold_build2_loc (loc, MINUS_EXPR, index_type,
convert (index_type, size),
convert (index_type,
size_one_node));
/* The above overflows when size does not fit
in index_type.
??? While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), handling
this case seems like an unnecessary complication. */
if (TREE_CODE (size) == INTEGER_CST
&& !int_fits_type_p (size, index_type))
{
if (name)
error_at (loc, "size of array %qE is too large",
name);
else
error_at (loc, "size of unnamed array is too large");
type = error_mark_node;
continue;
}
itype = build_index_type (itype);
}
if (this_size_varies)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (size),
*expr, size);
else
*expr = size;
*expr_const_operands &= size_maybe_const;
}
}
else if (decl_context == FIELD)
{
bool flexible_array_member = false;
if (array_parm_vla_unspec_p)
/* Field names can in fact have function prototype
scope so [*] is disallowed here through making
the field variably modified, not through being
something other than a declaration with function
prototype scope. */
size_varies = true;
else
{
const struct c_declarator *t = declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
flexible_array_member = (t->kind == cdk_id);
}
if (flexible_array_member
&& !in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
/* ISO C99 Flexible array members are effectively
identical to GCC's zero-length array extension. */
if (flexible_array_member || array_parm_vla_unspec_p)
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
else if (decl_context == PARM)
{
if (array_parm_vla_unspec_p)
{
itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
size_varies = true;
}
}
else if (decl_context == TYPENAME)
{
if (array_parm_vla_unspec_p)
{
/* C99 6.7.5.2p4 */
warning (0, "%<[*]%> not in a declaration");
/* We use this to avoid messing up with incomplete
array types of the same type, that would
otherwise be modified below. */
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
size_varies = true;
}
}
/* Complain about arrays of incomplete types. */
if (!COMPLETE_TYPE_P (type))
{
error_at (loc, "array type has incomplete element type %qT",
type);
/* See if we can be more helpful. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
inform (loc, "declaration of %qE as multidimensional "
"array must have bounds for all dimensions "
"except the first", name);
else
inform (loc, "declaration of multidimensional array "
"must have bounds for all dimensions except "
"the first");
}
type = error_mark_node;
}
else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
{
addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type))
type = build_qualified_type (type,
ENCODE_QUAL_ADDR_SPACE (as));
type = build_array_type (type, itype);
}
if (type != error_mark_node)
{
if (size_varies)
{
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimensional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST)
type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* The GCC extension for zero-length arrays differs from
ISO flexible array members in that sizeof yields
zero. */
if (size && integer_zerop (size))
{
gcc_assert (itype);
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (array_parm_vla_unspec_p)
{
gcc_assert (itype);
/* The type is complete. C99 6.7.5.2p4 */
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (!valid_array_size_p (loc, type, name))
type = error_mark_node;
}
if (decl_context != PARM
&& (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static))
{
error_at (loc, "static or type qualifiers in non-parameter "
"array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = false;
}
orig_qual_indirect++;
break;
}
case cdk_function:
{
/* Say it's a definition only for the declarator closest
to the identifier, apart possibly from some
attributes. */
bool really_funcdef = false;
tree arg_types;
orig_qual_type = NULL_TREE;
if (funcdef_flag)
{
const struct c_declarator *t = declarator->declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
really_funcdef = (t->kind == cdk_id);
}
/* Declaring a function type. Make sure we have a valid
type for the function to return. */
if (type == error_mark_node)
continue;
size_varies = false;
/* Warn about some types functions can't return. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning a "
"function", name);
else
error_at (loc, "type name declared as function "
"returning a function");
type = integer_type_node;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning an array",
name);
else
error_at (loc, "type name declared as function returning "
"an array");
type = integer_type_node;
}
/* Construct the function type and go to the next
inner layer of declarator. */
arg_info = declarator->u.arg_info;
arg_types = grokparms (arg_info, really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
{
const enum c_declspec_word ignored_quals_list[] =
{
cdw_const, cdw_volatile, cdw_restrict, cdw_address_space,
cdw_atomic, cdw_number_of_elements
};
location_t specs_loc
= smallest_type_quals_location (declspecs->locations,
ignored_quals_list);
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = declspecs->locations[cdw_typedef];
if (specs_loc == UNKNOWN_LOCATION)
specs_loc = loc;
/* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
function definitions in ISO C; GCC used to used
them for noreturn functions. The resolution of C11
DR#423 means qualifiers (other than _Atomic) are
actually removed from the return type when
determining the function type. */
int quals_used = type_quals;
if (flag_isoc11)
quals_used &= TYPE_QUAL_ATOMIC;
if (quals_used && VOID_TYPE_P (type) && really_funcdef)
pedwarn (specs_loc, 0,
"function definition has qualified void return type");
else
warning_at (specs_loc, OPT_Wignored_qualifiers,
"type qualifiers ignored on function return type");
/* Ensure an error for restrict on invalid types; the
DR#423 resolution is not entirely clear about
this. */
if (flag_isoc11
&& (type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
error_at (loc, "invalid use of %<restrict%>");
if (quals_used)
type = c_build_qualified_type (type, quals_used);
}
type_quals = TYPE_UNQUALIFIED;
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
c_arg_tag *tag;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
}
case cdk_pointer:
{
/* Merge any constancy or volatility into the target type
for the pointer. */
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
orig_qual_type = NULL_TREE;
size_varies = false;
/* When the pointed-to type involves components of variable size,
care must be taken to ensure that the size evaluation code is
emitted early enough to dominate all the possible later uses
and late enough for the variables on which it depends to have
been assigned.
This is expected to happen automatically when the pointed-to
type has a name/declaration of it's own, but special attention
is required if the type is anonymous.
We attach an artificial TYPE_DECL to such pointed-to type
and arrange for it to be included in a DECL_EXPR. This
forces the sizes evaluation at a safe point and ensures it
is not deferred until e.g. within a deeper conditional context.
PARM contexts have no enclosing statement list that
can hold the DECL_EXPR, so we need to use a BIND_EXPR
instead, and add it to the list of expressions that
need to be evaluated.
TYPENAME contexts do have an enclosing statement list,
but it would be incorrect to use it, as the size should
only be evaluated if the containing expression is
evaluated. We might also be in the middle of an
expression with side effects on the pointed-to type size
"arguments" prior to the pointer declaration point and
the fake TYPE_DECL in the enclosing context would force
the size evaluation prior to the side effects. We therefore
use BIND_EXPRs in TYPENAME contexts too. */
if (!TYPE_NAME (type)
&& variably_modified_type_p (type, NULL_TREE))
{
tree bind = NULL_TREE;
if (decl_context == TYPENAME || decl_context == PARM)
{
bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = push_stmt_list ();
push_scope ();
}
tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (decl) = 1;
pushdecl (decl);
finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE);
TYPE_NAME (type) = decl;
if (bind)
{
pop_scope ();
BIND_EXPR_BODY (bind)
= pop_stmt_list (BIND_EXPR_BODY (bind));
if (*expr)
*expr = build2 (COMPOUND_EXPR, void_type_node, *expr,
bind);
else
*expr = bind;
}
}
type = c_build_pointer_type (type);
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
declarator = declarator->declarator;
break;
}
default:
gcc_unreachable ();
}
}
*decl_attrs = chainon (returned_attrs, *decl_attrs);
/* Now TYPE has the actual type, apart from any qualifiers in
TYPE_QUALS. */
/* Warn about address space used for things other than static memory or
pointers. */
address_space = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (address_space))
{
if (decl_context == NORMAL)
{
switch (storage_class)
{
case csc_auto:
error ("%qs combined with %<auto%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_register:
error ("%qs combined with %<register%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_none:
if (current_function_scope)
{
error ("%qs specified for auto variable %qE",
c_addr_space_name (address_space), name);
break;
}
break;
case csc_static:
case csc_extern:
case csc_typedef:
break;
default:
gcc_unreachable ();
}
}
else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE)
{
if (name)
error ("%qs specified for parameter %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for unnamed parameter",
c_addr_space_name (address_space));
}
else if (decl_context == FIELD)
{
if (name)
error ("%qs specified for structure field %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for structure field",
c_addr_space_name (address_space));
}
}
/* Check the type and width of a bit-field. */
if (bitfield)
{
check_bitfield_type_and_width (loc, &type, width, name);
/* C11 makes it implementation-defined (6.7.2.1#5) whether
atomic types are permitted for bit-fields; we have no code to
make bit-field accesses atomic, so disallow them. */
if (type_quals & TYPE_QUAL_ATOMIC)
{
if (name)
error_at (loc, "bit-field %qE has atomic type", name);
else
error_at (loc, "bit-field has atomic type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
}
/* Reject invalid uses of _Alignas. */
if (declspecs->alignas_p)
{
if (storage_class == csc_typedef)
error_at (loc, "alignment specified for typedef %qE", name);
else if (storage_class == csc_register)
error_at (loc, "alignment specified for %<register%> object %qE",
name);
else if (decl_context == PARM)
{
if (name)
error_at (loc, "alignment specified for parameter %qE", name);
else
error_at (loc, "alignment specified for unnamed parameter");
}
else if (bitfield)
{
if (name)
error_at (loc, "alignment specified for bit-field %qE", name);
else
error_at (loc, "alignment specified for unnamed bit-field");
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "alignment specified for function %qE", name);
else if (declspecs->align_log != -1 && TYPE_P (type))
{
alignas_align = 1U << declspecs->align_log;
if (alignas_align < min_align_of_type (type))
{
if (name)
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of %qE", name);
else
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of unnamed field");
alignas_align = 0;
}
}
}
/* If this is declaring a typedef name, return a TYPE_DECL. */
if (storage_class == csc_typedef)
{
tree decl;
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
TYPE_DECL, declarator->u.id, type);
if (declspecs->explicit_signed_p)
C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl);
if (warn_cxx_compat && declarator->u.id != NULL_TREE)
{
struct c_binding *b = I_TAG_BINDING (declarator->u.id);
if (b != NULL
&& b->decl != NULL_TREE
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type))
{
if (warning_at (declarator->id_loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"), decl)
&& b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
return decl;
}
/* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */
if (decl_context == TYPENAME)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids const or volatile function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
return type;
}
if (pedantic && decl_context == FIELD
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.2.1p8 */
pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot "
"have a variably modified type");
}
/* Aside from typedefs and type names (handle above),
`void' at top level (not within pointer)
is allowed only in public variables.
We don't complain about parms either, but that is because
a better error message can be made later. */
if (VOID_TYPE_P (type) && decl_context != PARM
&& !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
&& (storage_class == csc_extern
|| (current_scope == file_scope
&& !(storage_class == csc_static
|| storage_class == csc_register)))))
{
error_at (loc, "variable or field %qE declared void", name);
type = integer_type_node;
}
/* Now create the decl, which may be a VAR_DECL, a PARM_DECL
or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
{
tree decl;
if (decl_context == PARM)
{
tree promoted_type;
bool array_parameter_p = false;
/* A parameter declared as an array of T is really a pointer to T.
One declared as a function is really a pointer to a function. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* Transfer const-ness of array into that of type pointed to. */
type = TREE_TYPE (type);
if (orig_qual_type != NULL_TREE)
{
if (orig_qual_indirect == 0)
orig_qual_type = TREE_TYPE (orig_qual_type);
else
orig_qual_indirect--;
}
if (type_quals)
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
type = c_build_pointer_type (type);
type_quals = array_ptr_quals;
if (type_quals)
type = c_build_qualified_type (type, type_quals);
/* We don't yet implement attributes in this context. */
if (array_ptr_attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes,
"attributes in parameter array declarator ignored");
size_varies = false;
array_parameter_p = true;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = TYPE_UNQUALIFIED;
}
else if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
PARM_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
C_ARRAY_PARAMETER (decl) = array_parameter_p;
/* Compute the type actually passed in the parmlist,
for the case where there is no prototype.
(For example, shorts and chars are passed as ints.)
When there is a prototype, this is overridden later. */
if (type == error_mark_node)
promoted_type = type;
else
promoted_type = c_type_promotes_to (type);
DECL_ARG_TYPE (decl) = promoted_type;
if (declspecs->inline_p)
pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl);
}
else if (decl_context == FIELD)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
/* Structure field. It may not be a function. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "field %qE declared as a function", name);
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
&& !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
if (name)
error_at (loc, "field %qE has incomplete type", name);
else
error_at (loc, "unnamed field has incomplete type");
type = error_mark_node;
}
else if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
/* We have a flexible array member through a typedef.
Set suitable range. Whether this is a correct position
for a flexible array member will be determined elsewhere. */
if (!in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node,
NULL_TREE);
if (orig_qual_indirect == 0)
orig_qual_type = NULL_TREE;
}
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
decl = build_decl (declarator->id_loc,
FIELD_DECL, declarator->u.id, type);
DECL_NONADDRESSABLE_P (decl) = bitfield;
if (bitfield && !declarator->u.id)
{
TREE_NO_WARNING (decl) = 1;
DECL_PADDING_P (decl) = 1;
}
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (storage_class == csc_register || threadp)
{
error_at (loc, "invalid storage class for function %qE", name);
}
else if (current_scope != file_scope)
{
/* Function declaration not at file scope. Storage
classes other than `extern' are not allowed, C99
6.7.1p5, and `extern' makes no difference. However,
GCC allows 'auto', perhaps with 'inline', to support
nested functions. */
if (storage_class == csc_auto)
pedwarn (loc, OPT_Wpedantic,
"invalid storage class for function %qE", name);
else if (storage_class == csc_static)
{
error_at (loc, "invalid storage class for function %qE", name);
if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return NULL_TREE;
}
}
decl = build_decl (declarator->id_loc,
FUNCTION_DECL, declarator->u.id, type);
decl = build_decl_attribute_variant (decl, decl_attr);
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
/* Every function declaration is an external reference
(DECL_EXTERNAL) except for those which are not at file
scope and are explicitly declared "auto". This is
forbidden by standard C (C99 6.7.1p5) and is interpreted by
GCC to signify a forward declaration of a nested function. */
if (storage_class == csc_auto && current_scope != file_scope)
DECL_EXTERNAL (decl) = 0;
/* In C99, a function which is declared 'inline' with 'extern'
is not an external reference (which is confusing). It
means that the later definition of the function must be output
in this file, C99 6.7.4p6. In GNU C89, a function declared
'extern inline' is an external reference. */
else if (declspecs->inline_p && storage_class != csc_static)
DECL_EXTERNAL (decl) = ((storage_class == csc_extern)
== flag_gnu89_inline);
else
DECL_EXTERNAL (decl) = !initialized;
/* Record absence of global scope for `static' or `auto'. */
TREE_PUBLIC (decl)
= !(storage_class == csc_static || storage_class == csc_auto);
/* For a function definition, record the argument information
block where store_parm_decls will look for it. */
if (funcdef_flag)
current_function_arg_info = arg_info;
if (declspecs->default_int_p)
C_FUNCTION_IMPLICIT_INT (decl) = 1;
/* Record presence of `inline' and `_Noreturn', if it is
reasonable. */
if (flag_hosted && MAIN_NAME_P (declarator->u.id))
{
if (declspecs->inline_p)
pedwarn (loc, 0, "cannot inline function %<main%>");
if (declspecs->noreturn_p)
pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>");
}
else
{
if (declspecs->inline_p)
/* Record that the function is declared `inline'. */
DECL_DECLARED_INLINE_P (decl) = 1;
if (declspecs->noreturn_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Noreturn%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Noreturn%>");
TREE_THIS_VOLATILE (decl) = 1;
}
}
}
else
{
/* It's a variable. */
/* An uninitialized decl with `extern' is a reference. */
int extern_ref = !initialized && storage_class == csc_extern;
type = c_build_qualified_type (type, type_quals, orig_qual_type,
orig_qual_indirect);
/* C99 6.2.2p7: It is invalid (compile-time undefined
behavior) to create an 'extern' declaration for a
variable if there is a global declaration that is
'static' and the global declaration is not visible.
(If the static declaration _is_ currently visible,
the 'extern' declaration is taken to refer to that decl.) */
if (extern_ref && current_scope != file_scope)
{
tree global_decl = identifier_global_value (declarator->u.id);
tree visible_decl = lookup_name (declarator->u.id);
if (global_decl
&& global_decl != visible_decl
&& VAR_P (global_decl)
&& !TREE_PUBLIC (global_decl))
error_at (loc, "variable previously declared %<static%> "
"redeclared %<extern%>");
}
decl = build_decl (declarator->id_loc,
VAR_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl);
/* At file scope, an initialized extern declaration may follow
a static declaration. In that case, DECL_EXTERNAL will be
reset later in start_decl. */
DECL_EXTERNAL (decl) = (storage_class == csc_extern);
/* At file scope, the presence of a `static' or `register' storage
class specifier, or the absence of all storage class specifiers
makes this declaration a definition (perhaps tentative). Also,
the absence of `static' makes it public. */
if (current_scope == file_scope)
{
TREE_PUBLIC (decl) = storage_class != csc_static;
TREE_STATIC (decl) = !extern_ref;
}
/* Not at file scope, only `static' makes a static definition. */
else
{
TREE_STATIC (decl) = (storage_class == csc_static);
TREE_PUBLIC (decl) = extern_ref;
}
if (threadp)
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
if ((storage_class == csc_extern
|| (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE
&& !funcdef_flag))
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.5.2p2 */
if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "non-nested function with variably modified type");
else
error_at (loc, "object with variably modified type must have "
"no linkage");
}
/* Record `register' declaration for warnings on &
and in case doing stupid register allocation. */
if (storage_class == csc_register)
{
C_DECL_REGISTER (decl) = 1;
DECL_REGISTER (decl) = 1;
}
/* Record constancy and volatility. */
c_apply_type_quals_to_decl (type_quals, decl);
/* Apply _Alignas specifiers. */
if (alignas_align)
{
SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT);
DECL_USER_ALIGN (decl) = 1;
}
/* If a type has volatile components, it should be stored in memory.
Otherwise, the fact that those components are volatile
will be ignored, and would even crash the compiler.
Of course, this only makes sense on VAR,PARM, and RESULT decl's. */
if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))
&& (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL))
{
/* It is not an error for a structure with volatile fields to
be declared register, but reset DECL_REGISTER since it
cannot actually go in a register. */
int was_reg = C_DECL_REGISTER (decl);
C_DECL_REGISTER (decl) = 0;
DECL_REGISTER (decl) = 0;
c_mark_addressable (decl);
C_DECL_REGISTER (decl) = was_reg;
}
/* This is the earliest point at which we might know the assembler
name of a variable. Thus, if it's known before this, die horribly. */
gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl)
|| !DECL_ASSEMBLER_NAME_SET_P (decl));
if (warn_cxx_compat
&& VAR_P (decl)
&& TREE_PUBLIC (decl)
&& TREE_STATIC (decl)
&& (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
("non-local variable %qD with anonymous type is "
"questionable in C++"),
decl);
return decl;
}
}
/* Decode the parameter-list info for a function type or function definition.
The argument is the value returned by `get_parm_info' (or made in c-parse.c
if there is an identifier list instead of a parameter decl list).
These two functions are separate because when a function returns
or receives functions then each is called multiple times but the order
of calls is different. The last call to `grokparms' is always the one
that contains the formal parameter names of a function definition.
Return a list of arg types to use in the FUNCTION_TYPE for this function.
FUNCDEF_FLAG is true for a function definition, false for
a mere declaration. A nonempty identifier-list gets an error message
when FUNCDEF_FLAG is false. */
static tree
grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
tree arg_types = arg_info->types;
if (funcdef_flag && arg_info->had_vla_unspec)
{
/* A function definition isn't function prototype scope C99 6.2.1p4. */
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than function prototype scope");
}
if (arg_types == NULL_TREE && !funcdef_flag
&& !in_system_header_at (input_location))
warning (OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
if (arg_types == error_mark_node)
/* Don't set TYPE_ARG_TYPES in this case. */
return NULL_TREE;
else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE)
{
if (!funcdef_flag)
{
pedwarn (input_location, 0, "parameter names (without types) in "
"function declaration");
arg_info->parms = NULL_TREE;
}
else
arg_info->parms = arg_info->types;
arg_info->types = NULL_TREE;
return NULL_TREE;
}
else
{
tree parm, type, typelt;
unsigned int parmno;
/* If there is a parameter of incomplete type in a definition,
this is an error. In a declaration this is valid, and a
struct or union type may be completed later, before any calls
or definition of the function. In the case where the tag was
first declared within the parameter list, a warning has
already been given. If a parameter has void type, then
however the function cannot be defined or called, so
warn. */
for (parm = arg_info->parms, typelt = arg_types, parmno = 1;
parm;
parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++)
{
type = TREE_VALUE (typelt);
if (type == error_mark_node)
continue;
if (!COMPLETE_TYPE_P (type))
{
if (funcdef_flag)
{
if (DECL_NAME (parm))
error_at (input_location,
"parameter %u (%q+D) has incomplete type",
parmno, parm);
else
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %u has incomplete type",
parmno);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
else if (VOID_TYPE_P (type))
{
if (DECL_NAME (parm))
warning_at (input_location, 0,
"parameter %u (%q+D) has void type",
parmno, parm);
else
warning_at (DECL_SOURCE_LOCATION (parm), 0,
"parameter %u has void type",
parmno);
}
}
if (DECL_NAME (parm) && TREE_USED (parm))
warn_if_shadowing (parm);
}
return arg_types;
}
}
/* Allocate and initialize a c_arg_info structure from the parser's
obstack. */
struct c_arg_info *
build_arg_info (void)
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = NULL_TREE;
ret->tags = NULL;
ret->types = NULL_TREE;
ret->others = NULL_TREE;
ret->pending_sizes = NULL;
ret->had_vla_unspec = 0;
return ret;
}
/* Take apart the current scope and return a c_arg_info structure with
info on a parameter list just parsed.
This structure is later fed to 'grokparms' and 'store_parm_decls'.
ELLIPSIS being true means the argument list ended in '...' so don't
append a sentinel (void_list_node) to the end of the type-list.
EXPR is NULL or an expression that needs to be evaluated for the
side effects of array size expressions in the parameters. */
struct c_arg_info *
get_parm_info (bool ellipsis, tree expr)
{
struct c_binding *b = current_scope->bindings;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = NULL_TREE;
vec<c_arg_tag, va_gc> *tags = NULL;
tree types = NULL_TREE;
tree others = NULL_TREE;
bool gave_void_only_once_err = false;
arg_info->had_vla_unspec = current_scope->had_vla_unspec;
/* The bindings in this scope must not get put into a block.
We will take care of deleting the binding nodes. */
current_scope->bindings = 0;
/* This function is only called if there was *something* on the
parameter list. */
gcc_assert (b);
/* A parameter list consisting solely of 'void' indicates that the
function takes no arguments. But if the 'void' is qualified
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
{
if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED
|| C_DECL_REGISTER (b->decl))
error_at (b->locus, "%<void%> as only parameter may not be qualified");
/* There cannot be an ellipsis. */
if (ellipsis)
error_at (b->locus, "%<void%> must be the only parameter");
arg_info->types = void_list_node;
return arg_info;
}
if (!ellipsis)
types = void_list_node;
/* Break up the bindings list into parms, tags, types, and others;
apply sanity checks; purge the name-to-decl bindings. */
while (b)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
{
case PARM_DECL:
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
/* Check for forward decls that never got their actual decl. */
if (TREE_ASM_WRITTEN (decl))
error_at (b->locus,
"parameter %q+D has just a forward declaration", decl);
/* Check for (..., void, ...) and issue an error. */
else if (VOID_TYPE_P (type) && !DECL_NAME (decl))
{
if (!gave_void_only_once_err)
{
error_at (b->locus, "%<void%> must be the only parameter");
gave_void_only_once_err = true;
}
}
else
{
/* Valid parameter, add it to the list. */
DECL_CHAIN (decl) = parms;
parms = decl;
/* Since there is a prototype, args are passed in their
declared types. The back end may override this later. */
DECL_ARG_TYPE (decl) = type;
types = tree_cons (0, type, types);
}
break;
case ENUMERAL_TYPE: keyword = "enum"; goto tag;
case UNION_TYPE: keyword = "union"; goto tag;
case RECORD_TYPE: keyword = "struct"; goto tag;
tag:
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
/* Warn about any struct, union or enum tags defined in a
parameter list. The scope of such types is limited to
the parameter list, which is rarely if ever desirable
(it's impossible to call such a function with type-
correct arguments). An anonymous union parm type is
meaningful as a GNU extension, so don't warn for that. */
if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE)
{
if (b->id)
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"%<%s %E%> declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword, b->id);
else
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning_at (b->locus, 0,
"anonymous %s declared inside parameter list"
" will not be visible outside of this definition or"
" declaration", keyword);
}
tag.id = b->id;
tag.type = decl;
vec_safe_push (tags, tag);
break;
case FUNCTION_DECL:
/* FUNCTION_DECLs appear when there is an implicit function
declaration in the parameter list. */
gcc_assert (b->nested || seen_error ());
goto set_shadowed;
case CONST_DECL:
case TYPE_DECL:
/* CONST_DECLs appear here when we have an embedded enum,
and TYPE_DECLs appear here when we have an embedded struct
or union. No warnings for this - we already warned about the
type itself. */
/* When we reinsert this decl in the function body, we need
to reconstruct whether it was marked as nested. */
gcc_assert (!b->nested);
DECL_CHAIN (decl) = others;
others = decl;
/* fall through */
case ERROR_MARK:
set_shadowed:
/* error_mark_node appears here when we have an undeclared
variable. Just throw it away. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
break;
/* Other things that might be encountered. */
case LABEL_DECL:
case VAR_DECL:
default:
gcc_unreachable ();
}
b = free_binding_and_advance (b);
}
arg_info->parms = parms;
arg_info->tags = tags;
arg_info->types = types;
arg_info->others = others;
arg_info->pending_sizes = expr;
return arg_info;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference with location LOC if it is
not defined. Return a c_typespec structure for the type
specifier. */
struct c_typespec
parser_xref_tag (location_t loc, enum tree_code code, tree name)
{
struct c_typespec ret;
tree ref;
location_t refloc;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
/* If a cross reference is requested, look up the type
already defined for this tag and return it. */
ref = lookup_tag (code, name, false, &refloc);
/* If this is the right type of tag, return what we found.
(This reference will be shadowed by shadow_tag later if appropriate.)
If this is the wrong type of tag, do not return it. If it was the
wrong type in the same scope, we will have had an error
message already; if in a different scope and declaring
a name, pending_xref_error will give an error message; but if in a
different scope and not declaring a name, this tag should
shadow the previous declaration of a different type of tag, and
this would not work properly if we return the reference found.
(For example, with "struct foo" in an outer scope, "union foo;"
must shadow that tag with a new one of union type.) */
ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref);
if (ref && TREE_CODE (ref) == code)
{
if (C_TYPE_DEFINED_IN_STRUCT (ref)
&& loc != UNKNOWN_LOCATION
&& warn_cxx_compat)
{
switch (code)
{
case ENUMERAL_TYPE:
warning_at (loc, OPT_Wc___compat,
("enum type defined in struct or union "
"is not visible in C++"));
inform (refloc, "enum type defined here");
break;
case RECORD_TYPE:
warning_at (loc, OPT_Wc___compat,
("struct defined in struct or union "
"is not visible in C++"));
inform (refloc, "struct defined here");
break;
case UNION_TYPE:
warning_at (loc, OPT_Wc___compat,
("union defined in struct or union "
"is not visible in C++"));
inform (refloc, "union defined here");
break;
default:
gcc_unreachable();
}
}
ret.spec = ref;
return ret;
}
/* If no such tag is yet defined, create a forward-reference node
and record it as the "definition".
When a real declaration of this type is found,
the forward-reference will be altered into a real type. */
ref = make_node (code);
if (code == ENUMERAL_TYPE)
{
/* Give the type a default layout like unsigned int
to avoid crashing if it does not get defined. */
SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node));
SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node));
TYPE_USER_ALIGN (ref) = 0;
TYPE_UNSIGNED (ref) = 1;
TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
}
pushtag (loc, name, ref);
ret.spec = ref;
return ret;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference if it is not defined.
Return a tree for the type. */
tree
xref_tag (enum tree_code code, tree name)
{
return parser_xref_tag (input_location, code, name).spec;
}
/* Make sure that the tag NAME is defined *in the current scope*
at least as a forward reference.
LOC is the location of the struct's definition.
CODE says which kind of tag NAME ought to be.
This stores the current value of the file static STRUCT_PARSE_INFO
in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a
new c_struct_parse_info structure. The old value of
STRUCT_PARSE_INFO is restored in finish_struct. */
tree
start_struct (location_t loc, enum tree_code code, tree name,
struct c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree ref = NULL_TREE;
location_t refloc = UNKNOWN_LOCATION;
if (name != NULL_TREE)
ref = lookup_tag (code, name, true, &refloc);
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_STUB_DECL (ref))
refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref));
if (TYPE_SIZE (ref))
{
if (code == UNION_TYPE)
error_at (loc, "redefinition of %<union %E%>", name);
else
error_at (loc, "redefinition of %<struct %E%>", name);
if (refloc != UNKNOWN_LOCATION)
inform (refloc, "originally defined here");
/* Don't create structures using a name already in use. */
ref = NULL_TREE;
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error_at (loc, "nested redefinition of %<union %E%>", name);
else
error_at (loc, "nested redefinition of %<struct %E%>", name);
/* Don't bother to report "originally defined here" for a
nested redefinition; the original definition should be
obvious. */
/* Don't create structures that contain themselves. */
ref = NULL_TREE;
}
}
/* Otherwise create a forward-reference just so the tag is in scope. */
if (ref == NULL_TREE || TREE_CODE (ref) != code)
{
ref = make_node (code);
pushtag (loc, name, ref);
}
C_TYPE_BEING_DEFINED (ref) = 1;
for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = flag_pack_struct;
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = new c_struct_parse_info ();
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
terribly serious as C++ doesn't permit statement exprs within
sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return ref;
}
/* Process the specs, declarator and width (NULL if omitted)
of a structure component, returning a FIELD_DECL node.
WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node.
DECL_ATTRS is as for grokdeclarator.
LOC is the location of the structure component.
This is done during the parsing of the struct declaration.
The FIELD_DECL nodes are chained together and the lot of them
are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
tree
grokfield (location_t loc,
struct c_declarator *declarator, struct c_declspecs *declspecs,
tree width, tree *decl_attrs)
{
tree value;
if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE
&& width == NULL_TREE)
{
/* This is an unnamed decl.
If we have something of the form "union { list } ;" then this
is the anonymous union extension. Similarly for struct.
If this is something of the form "struct foo;", then
If MS or Plan 9 extensions are enabled, this is handled as
an anonymous struct.
Otherwise this is a forward declaration of a structure tag.
If this is something of the form "foo;" and foo is a TYPE_DECL, then
If foo names a structure or union without a tag, then this
is an anonymous struct (this is permitted by C11).
If MS or Plan 9 extensions are enabled and foo names a
structure, then again this is an anonymous struct.
Otherwise this is an error.
Oh what a horrid tangled web we weave. I wonder if MS consciously
took this from Plan 9 or if it was an accident of implementation
that took root before someone noticed the bug... */
tree type = declspecs->type;
bool ok = false;
if (RECORD_OR_UNION_TYPE_P (type)
&& (flag_ms_extensions
|| flag_plan9_extensions
|| !declspecs->typedef_p))
{
if (flag_ms_extensions || flag_plan9_extensions)
ok = true;
else if (TYPE_NAME (type) == NULL)
ok = true;
else
ok = false;
}
if (!ok)
{
pedwarn (loc, 0, "declaration does not declare anything");
return NULL_TREE;
}
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 doesn%'t support unnamed structs/unions");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 doesn%'t support unnamed structs/unions");
}
value = grokdeclarator (declarator, declspecs, FIELD, false,
width ? &width : NULL, decl_attrs, NULL, NULL,
DEPRECATED_NORMAL);
finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE);
DECL_INITIAL (value) = width;
if (width)
SET_DECL_C_BIT_FIELD (value);
if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE)
{
/* If we currently have a binding for this field, set the
in_struct field in the binding, so that we warn about lookups
which find it. */
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value));
if (b != NULL)
{
/* If the in_struct field is not yet set, push it on a list
to be cleared when this struct is finished. */
if (!b->in_struct)
{
struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
}
return value;
}
/* Subroutine of detect_field_duplicates: return whether X and Y,
which are both fields in the same struct, have duplicate field
names. */
static bool
is_duplicate_field (tree x, tree y)
{
if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y))
return true;
/* When using -fplan9-extensions, an anonymous field whose name is a
typedef can duplicate a field name. */
if (flag_plan9_extensions
&& (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE))
{
tree xt, xn, yt, yn;
xt = TREE_TYPE (x);
if (DECL_NAME (x) != NULL_TREE)
xn = DECL_NAME (x);
else if (RECORD_OR_UNION_TYPE_P (xt)
&& TYPE_NAME (xt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL)
xn = DECL_NAME (TYPE_NAME (xt));
else
xn = NULL_TREE;
yt = TREE_TYPE (y);
if (DECL_NAME (y) != NULL_TREE)
yn = DECL_NAME (y);
else if (RECORD_OR_UNION_TYPE_P (yt)
&& TYPE_NAME (yt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL)
yn = DECL_NAME (TYPE_NAME (yt));
else
yn = NULL_TREE;
if (xn != NULL_TREE && xn == yn)
return true;
}
return false;
}
/* Subroutine of detect_field_duplicates: add the fields of FIELDLIST
to HTAB, giving errors for any duplicates. */
static void
detect_field_duplicates_hash (tree fieldlist,
hash_table<nofree_ptr_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != NULL_TREE)
{
slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
*slot = y;
}
else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
{
detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab);
/* When using -fplan9-extensions, an anonymous field whose
name is a typedef can duplicate a field name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
}
}
/* Generate an error for any duplicate field names in FIELDLIST. Munge
the list such that this does not present a problem later. */
static void
detect_field_duplicates (tree fieldlist)
{
tree x, y;
int timeout = 10;
/* If the struct is the list of instance variables of an Objective-C
class, then we need to check all the instance variables of
superclasses when checking for duplicates (since you can't have
an instance variable in a subclass with the same name as an
instance variable in a superclass). We pass on this job to the
Objective-C compiler. objc_detect_field_duplicates() will return
false if we are not checking the list of instance variables and
the C frontend should proceed with the standard field duplicate
checks. If we are checking the list of instance variables, the
ObjC frontend will do the check, emit the errors if needed, and
then return true. */
if (c_dialect_objc ())
if (objc_detect_field_duplicates (false))
return;
/* First, see if there are more than "a few" fields.
This is trivially true if there are zero or one fields. */
if (!fieldlist || !DECL_CHAIN (fieldlist))
return;
x = fieldlist;
do {
timeout--;
if (DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
timeout = 0;
x = DECL_CHAIN (x);
} while (timeout > 0 && x);
/* If there were "few" fields and no anonymous structures or unions,
avoid the overhead of allocating a hash table. Instead just do
the nested traversal thing. */
if (timeout > 0)
{
for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x))
/* When using -fplan9-extensions, we can have duplicates
between typedef names and fields. */
if (DECL_NAME (x)
|| (flag_plan9_extensions
&& DECL_NAME (x) == NULL_TREE
&& RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL))
{
for (y = fieldlist; y != x; y = TREE_CHAIN (y))
if (is_duplicate_field (y, x))
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
}
}
else
{
hash_table<nofree_ptr_hash <tree_node> > htab (37);
detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Finish up struct info used by -Wc++-compat. */
static void
warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code,
location_t record_loc)
{
unsigned int ix;
tree x;
struct c_binding *b;
if (fieldlist == NULL_TREE)
{
if (code == RECORD_TYPE)
warning_at (record_loc, OPT_Wc___compat,
"empty struct has size 0 in C, size 1 in C++");
else
warning_at (record_loc, OPT_Wc___compat,
"empty union has size 0 in C, size 1 in C++");
}
/* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in
the current struct. We do this now at the end of the struct
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
typedefs used when declaring fields in this struct. If the name
of any of the fields is also a typedef name then the struct would
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a hash_set<tree> using the name of the typedef. We can use
a hash_set<tree> because identifiers are interned. */
hash_set<tree> tset;
FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
tset.add (DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE
&& tset.contains (DECL_NAME (x)))
{
warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat,
("using %qD as both field and typedef name is "
"invalid in C++"),
x);
/* FIXME: It would be nice to report the location where
the typedef name is used. */
}
}
}
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
/* Function to help qsort sort FIELD_DECLs by name order. */
static int
field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
if (DECL_NAME (*x) < DECL_NAME (*y))
return -1;
return 1;
}
/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
LOC is the location of the RECORD_TYPE or UNION_TYPE's definition.
FIELDLIST is a chain of FIELD_DECL nodes for the fields.
ATTRIBUTES are attributes to be applied to the structure.
ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when
the struct was started. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
struct c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
/* If this type was previously laid out as a forward reference,
make sure we lay it out again. */
TYPE_SIZE (t) = NULL_TREE;
decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (pedantic)
{
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE)
break;
if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
break;
}
if (x == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "union has no named members");
else
pedwarn (loc, OPT_Wpedantic, "union has no members");
}
else
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "struct has no named members");
else
pedwarn (loc, OPT_Wpedantic, "struct has no members");
}
}
}
/* Install struct as DECL_CONTEXT of each field decl.
Also process specified field sizes, found in the DECL_INITIAL,
storing 0 there after the type has been changed to precision equal
to its width, rather than the precision of the specified standard
type. (Correct layout requires the original type to have been preserved
until now.) */
bool saw_named_field = false;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (TREE_TYPE (x) == error_mark_node)
continue;
DECL_CONTEXT (x) = t;
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
else
{
/* A field that is pseudo-const makes the structure likewise. */
tree t1 = strip_array_types (TREE_TYPE (x));
if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1))
C_TYPE_FIELDS_READONLY (t) = 1;
}
/* Any field that is volatile means variables of this type must be
treated in some ways as volatile. */
if (TREE_THIS_VOLATILE (x))
C_TYPE_FIELDS_VOLATILE (t) = 1;
/* Any field of nominal variable size implies structure is too. */
if (C_DECL_VARIABLE_SIZE (x))
C_TYPE_VARIABLE_SIZE (t) = 1;
if (DECL_C_BIT_FIELD (x))
{
unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x));
DECL_SIZE (x) = bitsize_int (width);
DECL_BIT_FIELD (x) = 1;
}
if (TYPE_PACKED (t)
&& (DECL_BIT_FIELD (x)
|| TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
DECL_PACKED (x) = 1;
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in union");
TREE_TYPE (x) = error_mark_node;
}
else if (DECL_CHAIN (x) != NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member not at end of struct");
TREE_TYPE (x) = error_mark_node;
}
else if (!saw_named_field)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in a struct with no named "
"members");
TREE_TYPE (x) = error_mark_node;
}
}
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x)
|| RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)))
saw_named_field = true;
}
detect_field_duplicates (fieldlist);
/* Now we have the nearly final fieldlist. Record it,
then lay out the structure or union (including the fields). */
TYPE_FIELDS (t) = fieldlist;
maybe_apply_pragma_scalar_storage_order (t);
layout_type (t);
if (TYPE_SIZE_UNIT (t)
&& TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST
&& !TREE_OVERFLOW (TYPE_SIZE_UNIT (t))
&& !valid_constant_size_p (TYPE_SIZE_UNIT (t)))
error ("type %qT is too large", t);
/* Give bit-fields their proper types and rewrite the type of array fields
with scalar component if the enclosing type has reverse storage order. */
for (tree field = fieldlist; field; field = DECL_CHAIN (field))
{
if (TREE_CODE (field) == FIELD_DECL
&& DECL_INITIAL (field)
&& TREE_TYPE (field) != error_mark_node)
{
unsigned HOST_WIDE_INT width
= tree_to_uhwi (DECL_INITIAL (field));
tree type = TREE_TYPE (field);
if (width != TYPE_PRECISION (type))
{
TREE_TYPE (field)
= c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type));
SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field)));
}
DECL_INITIAL (field) = NULL_TREE;
}
else if (TYPE_REVERSE_STORAGE_ORDER (t)
&& TREE_CODE (field) == FIELD_DECL
&& TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
{
tree ftype = TREE_TYPE (field);
tree ctype = strip_array_types (ftype);
if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode)
{
tree fmain_type = TYPE_MAIN_VARIANT (ftype);
tree *typep = &fmain_type;
do {
*typep = build_distinct_type_copy (*typep);
TYPE_REVERSE_STORAGE_ORDER (*typep) = 1;
typep = &TREE_TYPE (*typep);
} while (TREE_CODE (*typep) == ARRAY_TYPE);
TREE_TYPE (field)
= c_build_qualified_type (fmain_type, TYPE_QUALS (ftype));
}
}
}
/* Now we have the truly final field list.
Store it in this type and in the variants. */
TYPE_FIELDS (t) = fieldlist;
/* If there are lots of fields, sort so we can look through them fast.
We arbitrarily consider 16 or more elts to be "a lot". */
{
int len = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (len > 15 || DECL_NAME (x) == NULL)
break;
len += 1;
}
if (len > 15)
{
tree *field_array;
struct lang_type *space;
struct sorted_fields_type *space2;
len += list_length (x);
/* Use the same allocation policy here that make_node uses, to
ensure that this lives as long as the rest of the struct decl.
All decls in an inline function need to be saved. */
space = ggc_cleared_alloc<struct lang_type> ();
space2 = (sorted_fields_type *) ggc_internal_alloc
(sizeof (struct sorted_fields_type) + len * sizeof (tree));
len = 0;
space->s = space2;
field_array = &space2->elts[0];
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
field_array[len++] = x;
/* If there is anonymous struct or union, break out of the loop. */
if (DECL_NAME (x) == NULL)
break;
}
/* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
if (x == NULL)
{
TYPE_LANG_SPECIFIC (t) = space;
TYPE_LANG_SPECIFIC (t)->s->len = len;
field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
qsort (field_array, len, sizeof (tree), field_decl_cmp);
}
}
}
/* Note: C_TYPE_INCOMPLETE_VARS overloads TYPE_VFIELD which is used
in dwarf2out via rest_of_decl_compilation below and means
something totally different. Since we will be clearing
C_TYPE_INCOMPLETE_VARS shortly after we iterate through them,
clear it ahead of time and avoid problems in dwarf2out. Ideally,
C_TYPE_INCOMPLETE_VARS should use some language specific
node. */
tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t));
for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
{
TYPE_FIELDS (x) = TYPE_FIELDS (t);
TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t);
C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t);
C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t);
C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE;
}
/* If this was supposed to be a transparent union, but we can't
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_AGGR (t)
&& (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_AGGR (t) = 0;
warning_at (loc, 0, "union cannot be made transparent");
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
if (TYPE_STUB_DECL (t))
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc;
/* Finish debugging output for this type. */
rest_of_type_compilation (t, toplevel);
/* If this structure or union completes the type of any previous
variable declaration, lay it out and output its rtl. */
for (x = incomplete_vars; x; x = TREE_CHAIN (x))
{
tree decl = TREE_VALUE (x);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (decl));
if (TREE_CODE (decl) != TYPE_DECL)
{
layout_decl (decl, 0);
if (c_dialect_objc ())
objc_check_decl (decl);
rest_of_decl_compilation (decl, toplevel, 0);
}
}
/* If we're inside a function proper, i.e. not file-scope and not still
parsing parameters, then arrange for the size of a variable sized type
to be bound now. */
if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE))
add_stmt (build_stmt (loc,
DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t)));
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc);
delete struct_parse_info;
struct_parse_info = enclosing_struct_parse_info;
/* If this struct is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (t);
return t;
}
static struct {
gt_pointer_operator new_value;
void *cookie;
} resort_data;
/* This routine compares two fields like field_decl_cmp but using the
pointer operator in resort_data. */
static int
resort_field_decl_cmp (const void *x_p, const void *y_p)
{
const tree *const x = (const tree *) x_p;
const tree *const y = (const tree *) y_p;
if (DECL_NAME (*x) == DECL_NAME (*y))
/* A nontype is "greater" than a type. */
return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL);
if (DECL_NAME (*x) == NULL_TREE)
return -1;
if (DECL_NAME (*y) == NULL_TREE)
return 1;
{
tree d1 = DECL_NAME (*x);
tree d2 = DECL_NAME (*y);
resort_data.new_value (&d1, resort_data.cookie);
resort_data.new_value (&d2, resort_data.cookie);
if (d1 < d2)
return -1;
}
return 1;
}
/* Resort DECL_SORTED_FIELDS because pointers have been reordered. */
void
resort_sorted_fields (void *obj,
void * ARG_UNUSED (orig_obj),
gt_pointer_operator new_value,
void *cookie)
{
struct sorted_fields_type *sf = (struct sorted_fields_type *) obj;
resort_data.new_value = new_value;
resort_data.cookie = cookie;
qsort (&sf->elts[0], sf->len, sizeof (tree),
resort_field_decl_cmp);
}
/* Lay out the type T, and its element type, and so on. */
static void
layout_array_type (tree t)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (t));
layout_type (t);
}
/* Begin compiling the definition of an enumeration type.
NAME is its name (or null if anonymous).
LOC is the enum's location.
Returns the type object, as yet incomplete.
Also records info about it so that build_enumerator
may be used to declare the individual values as they are read. */
tree
start_enum (location_t loc, struct c_enum_contents *the_enum, tree name)
{
tree enumtype = NULL_TREE;
location_t enumloc = UNKNOWN_LOCATION;
/* If this is the real definition for a previous forward reference,
fill in the contents in the same object that used to be the
forward reference. */
if (name != NULL_TREE)
enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc);
if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE)
{
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
else if (TYPE_STUB_DECL (enumtype))
{
enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype));
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc;
}
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
C_TYPE_BEING_DEFINED (enumtype) = 1;
if (TYPE_VALUES (enumtype) != NULL_TREE)
{
/* This enum is a named one that has been declared already. */
error_at (loc, "redeclaration of %<enum %E%>", name);
if (enumloc != UNKNOWN_LOCATION)
inform (enumloc, "originally defined here");
/* Completely replace its old definition.
The old enumerators remain defined, however. */
TYPE_VALUES (enumtype) = NULL_TREE;
}
the_enum->enum_next_value = integer_zero_node;
the_enum->enum_overflow = 0;
if (flag_short_enums)
for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v))
TYPE_PACKED (v) = 1;
/* FIXME: This will issue a warning for a use of a type defined
within sizeof in a statement expr. This is not terribly serious
as C++ doesn't permit statement exprs within sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return enumtype;
}
/* After processing and defining all the values of an enumeration type,
install their decls in the enumeration type and finish it off.
ENUMTYPE is the type object, VALUES a list of decl-value pairs,
and ATTRIBUTES are the specified attributes.
Returns ENUMTYPE. */
tree
finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = NULL_TREE, maxnode = NULL_TREE;
int precision;
signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
/* Calculate the maximum value of any enumerator in this type. */
if (values == error_mark_node)
minnode = maxnode = integer_zero_node;
else
{
minnode = maxnode = TREE_VALUE (values);
for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
{
tree value = TREE_VALUE (pair);
if (tree_int_cst_lt (maxnode, value))
maxnode = value;
if (tree_int_cst_lt (value, minnode))
minnode = value;
}
}
/* Construct the final type of this enumeration. It is the same
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
precision = MAX (tree_int_cst_min_precision (minnode, sign),
tree_int_cst_min_precision (maxnode, sign));
/* If the precision of the type was specified with an attribute and it
was too small, give an error. Otherwise, use it. */
if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes))
{
if (precision > TYPE_PRECISION (enumtype))
{
TYPE_PRECISION (enumtype) = 0;
error ("specified mode too small for enumeral values");
}
else
precision = TYPE_PRECISION (enumtype);
}
else
TYPE_PRECISION (enumtype) = 0;
if (TYPE_PACKED (enumtype)
|| precision > TYPE_PRECISION (integer_type_node)
|| TYPE_PRECISION (enumtype))
{
tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
tem = long_long_integer_type_node;
}
}
else
tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem);
SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem));
TYPE_SIZE (enumtype) = NULL_TREE;
TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem);
layout_type (enumtype);
if (values != error_mark_node)
{
/* Change the type of the enumerators to be the enum type. We
need to do this irrespective of the size of the enum, for
proper type checking. Replace the DECL_INITIALs of the
enumerators, and the value slots of the list, with copies
that have the enum type; they cannot be modified in place
because they may be shared (e.g. integer_zero_node) Finally,
change the purpose slots to point to the names of the decls. */
for (pair = values; pair; pair = TREE_CHAIN (pair))
{
tree enu = TREE_PURPOSE (pair);
tree ini = DECL_INITIAL (enu);
TREE_TYPE (enu) = enumtype;
/* The ISO C Standard mandates enumerators to have type int,
even though the underlying type of an enum type is
unspecified. However, GCC allows enumerators of any
integer type as an extensions. build_enumerator()
converts any enumerators that fit in an int to type int,
to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range.
When -pedantic is given, build_enumerator() would have
already warned about those that don't fit. Here we
convert the rest to the enumerator type. */
if (TREE_TYPE (ini) != integer_type_node)
ini = convert (enumtype, ini);
DECL_INITIAL (enu) = ini;
TREE_PURPOSE (pair) = DECL_NAME (enu);
TREE_VALUE (pair) = ini;
}
TYPE_VALUES (enumtype) = values;
}
/* Record the min/max values so that we can warn about bit-field
enumerations that are too small for the values. */
lt = ggc_cleared_alloc<struct lang_type> ();
lt->enum_min = minnode;
lt->enum_max = maxnode;
TYPE_LANG_SPECIFIC (enumtype) = lt;
/* Fix up all variant types of this enum type. */
for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
{
if (tem == enumtype)
continue;
TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
SET_TYPE_MODE (tem, TYPE_MODE (enumtype));
TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype));
TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype);
TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype);
TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype);
}
/* Finish debugging output for this type. */
rest_of_type_compilation (enumtype, toplevel);
/* If this enum is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (enumtype);
return enumtype;
}
/* Build and install a CONST_DECL for one value of the
current enumeration type (one that was begun with start_enum).
DECL_LOC is the location of the enumerator.
LOC is the location of the '=' operator if any, DECL_LOC otherwise.
Return a tree-list containing the CONST_DECL and its value.
Assignment of sequential values by default is handled here. */
tree
build_enumerator (location_t decl_loc, location_t loc,
struct c_enum_contents *the_enum, tree name, tree value)
{
tree decl, type;
/* Validate and default VALUE. */
if (value != NULL_TREE)
{
/* Don't issue more errors for error_mark_node (i.e. an
undeclared identifier) - just ignore the value expression. */
if (value == error_mark_node)
value = NULL_TREE;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (loc, "enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"enumerator value for %qE is not an integer "
"constant expression", name);
}
if (TREE_CODE (value) != INTEGER_CST)
{
error ("enumerator value for %qE is not an integer constant",
name);
value = NULL_TREE;
}
else
{
value = default_conversion (value);
constant_expression_warning (value);
}
}
}
/* Default based on previous value. */
/* It should no longer be possible to have NON_LVALUE_EXPR
in the default. */
if (value == NULL_TREE)
{
value = the_enum->enum_next_value;
if (the_enum->enum_overflow)
error_at (loc, "overflow in enumeration values");
}
/* Even though the underlying type of an enum is unspecified, the
type of enumeration constants is explicitly defined as int
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
pedwarn (loc, OPT_Wpedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
though the underlying type of an enum type is unspecified.
However, GCC allows enumerators of any integer type as an
extensions. Here we convert any enumerators that fit in an int
to type int, to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range. When
-pedantic is given, we would have already warned about those that
don't fit. We have to do this here rather than in finish_enum
because this value may be used to define more enumerators. */
if (int_fits_type_p (value, integer_type_node))
value = convert (integer_type_node, value);
/* Set basis for default for next value. */
the_enum->enum_next_value
= build_binary_op (EXPR_LOC_OR_LOC (value, input_location),
PLUS_EXPR, value, integer_one_node, false);
the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value);
/* Now create a declaration for the enum value name. */
type = TREE_TYPE (value);
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
(TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node)
&& TYPE_UNSIGNED (type)));
decl = build_decl (decl_loc, CONST_DECL, name, type);
DECL_INITIAL (decl) = convert (type, value);
pushdecl (decl);
return tree_cons (decl, value, NULL_TREE);
}
/* Create the FUNCTION_DECL for a function definition.
DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of
the declaration; they describe the function's name and the type it returns,
but twisted together in a fashion that parallels the syntax of C.
This function creates a binding context for the function body
as well as setting up the FUNCTION_DECL in current_function_decl.
Returns true on success. If the DECLARATOR is not suitable for a function
(it defines a datum instead), we return false to report a parse error. */
bool
start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
tree attributes)
{
tree decl1, old_decl;
tree restype, resdecl;
location_t loc;
current_function_returns_value = 0; /* Assume, until we see it does. */
current_function_returns_null = 0;
current_function_returns_abnormally = 0;
warn_about_return_type = 0;
c_switch_stack = NULL;
/* Indicate no valid break/continue context by setting these variables
to some non-null, non-label value. We'll notice and emit the proper
error message in c_finish_bc_stmt. */
c_break_label = c_cont_label = size_zero_node;
decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL,
&attributes, NULL, NULL, DEPRECATED_NORMAL);
invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1);
/* If the declarator is not suitable for a function definition,
cause a syntax error. */
if (decl1 == NULL_TREE
|| TREE_CODE (decl1) != FUNCTION_DECL)
return false;
loc = DECL_SOURCE_LOCATION (decl1);
c_decl_attributes (&decl1, attributes, 0);
if (DECL_DECLARED_INLINE_P (decl1)
&& DECL_UNINLINABLE (decl1)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1)))
warning_at (loc, OPT_Wattributes,
"inline function %qD given attribute noinline",
decl1);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl1) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1))
|| current_function_decl))
{
if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1);
}
announce_function (decl1);
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1))))
{
error_at (loc, "return type is an incomplete type");
/* Make it return void instead. */
TREE_TYPE (decl1)
= build_function_type (void_type_node,
TYPE_ARG_TYPES (TREE_TYPE (decl1)));
}
if (warn_about_return_type)
warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
: (warn_return_type ? OPT_Wreturn_type
: OPT_Wimplicit_int),
"return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
DECL_INITIAL (decl1) = error_mark_node;
/* A nested function is not global. */
if (current_function_decl != NULL_TREE)
TREE_PUBLIC (decl1) = 0;
/* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = NULL_TREE;
current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE;
if (!prototype_p (TREE_TYPE (decl1)))
{
if (old_decl != NULL_TREE
&& TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (TREE_TYPE (old_decl))))
{
if (stdarg_p (TREE_TYPE (old_decl)))
{
warning_at (loc, 0, "%q+D defined as variadic function "
"without prototype", decl1);
locate_old_decl (old_decl);
}
TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl),
TREE_TYPE (decl1));
current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (old_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (decl1));
}
if (TREE_PUBLIC (decl1))
{
/* If there is an external prototype declaration of this
function, record its location but do not copy information
to this decl. This may be an invisible declaration
(built-in or in a scope which has finished) or simply
have more refined argument types than any declaration
found above. */
struct c_binding *b;
for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed)
if (B_IN_SCOPE (b, external_scope))
break;
if (b)
{
tree ext_decl, ext_type;
ext_decl = b->decl;
ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl);
if (TREE_CODE (ext_type) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (ext_type)))
{
current_function_prototype_locus
= DECL_SOURCE_LOCATION (ext_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (ext_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (ext_type);
}
}
}
}
/* Optionally warn of old-fashioned def with no previous prototype. */
if (warn_strict_prototypes
&& old_decl != error_mark_node
&& !prototype_p (TREE_TYPE (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl))
warning_at (loc, OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
/* Optionally warn of any global def with no previous prototype. */
else if (warn_missing_prototypes
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl)
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
if the function has already been used. */
else if (warn_missing_prototypes
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& !prototype_p (TREE_TYPE (old_decl)))
warning_at (loc, OPT_Wmissing_prototypes,
"%qD was used with no prototype before its definition", decl1);
/* Optionally warn of any global def with no previous declaration. */
else if (warn_missing_declarations
&& TREE_PUBLIC (decl1)
&& old_decl == NULL_TREE
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_declarations,
"no previous declaration for %qD",
decl1);
/* Optionally warn of any def with no previous declaration
if the function has already been used. */
else if (warn_missing_declarations
&& old_decl != NULL_TREE
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& C_DECL_IMPLICIT (old_decl))
warning_at (loc, OPT_Wmissing_declarations,
"%qD was used with no declaration before its definition", decl1);
/* This function exists in static storage.
(This does not mean `static' in the C sense!) */
TREE_STATIC (decl1) = 1;
/* This is the earliest point at which we might know the assembler
name of the function. Thus, if it's set before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1));
/* If #pragma weak was used, mark the decl weak now. */
if (current_scope == file_scope)
maybe_apply_pragma_weak (decl1);
/* Warn for unlikely, improbable, or stupid declarations of `main'. */
if (warn_main && MAIN_NAME_P (DECL_NAME (decl1)))
{
if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
!= integer_type_node)
pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1);
else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1))))
pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD",
decl1);
check_main_parameter_types (decl1);
if (!TREE_PUBLIC (decl1))
pedwarn (loc, OPT_Wmain,
"%qD is normally a non-static function", decl1);
}
/* Record the decl so that the function name is defined.
If we already have a decl for this name, and it is a FUNCTION_DECL,
use the old decl. */
current_function_decl = pushdecl (decl1);
push_scope ();
declare_parm_level ();
restype = TREE_TYPE (TREE_TYPE (current_function_decl));
resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype);
DECL_ARTIFICIAL (resdecl) = 1;
DECL_IGNORED_P (resdecl) = 1;
DECL_RESULT (current_function_decl) = resdecl;
start_fname_decls ();
return true;
}
/* Subroutine of store_parm_decls which handles new-style function
definitions (prototype format). The parms already have decls, so we
need only record them as in effect and complain if any redundant
old-style parm decls were written. */
static void
store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info)
{
tree decl;
c_arg_tag *tag;
unsigned ix;
if (current_scope->bindings)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"old-style parameter declarations in prototyped "
"function definition");
/* Get rid of the old-style declarations. */
pop_scope ();
push_scope ();
}
/* Don't issue this warning for nested functions, and don't issue this
warning if we got here because ARG_INFO_TYPES was error_mark_node
(this happens when a function definition has just an ellipsis in
its parameter list). */
else if (!in_system_header_at (input_location)
&& !current_function_scope
&& arg_info->types != error_mark_node)
warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional,
"traditional C rejects ISO C style function definitions");
/* Now make all the parameter declarations visible in the function body.
We can bypass most of the grunt work of pushdecl. */
for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
{
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
if (!TREE_USED (decl))
warn_if_shadowing (decl);
}
else
error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted");
}
/* Record the parameter list in the function declaration. */
DECL_ARGUMENTS (fndecl) = arg_info->parms;
/* Now make all the ancillary declarations visible, likewise. */
for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false,
/*nested=*/(TREE_CODE (decl) == FUNCTION_DECL),
UNKNOWN_LOCATION);
}
/* And all the tag declarations. */
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
/* Subroutine of store_parm_decls which handles old-style function
definitions (separate parameter list and declarations). */
static void
store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
{
struct c_binding *b;
tree parm, decl, last;
tree parmids = arg_info->parms;
hash_set<tree> seen_args;
if (!in_system_header_at (input_location))
warning_at (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition, "old-style function definition");
/* Match each formal parameter name with its declaration. Save each
decl in the appropriate TREE_PURPOSE slot of the parmids chain. */
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
{
if (TREE_VALUE (parm) == NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"parameter name missing from parameter list");
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
b = I_SYMBOL_BINDING (TREE_VALUE (parm));
if (b && B_IN_CURRENT_SCOPE (b))
{
decl = b->decl;
/* Skip erroneous parameters. */
if (decl == error_mark_node)
continue;
/* If we got something other than a PARM_DECL it is an error. */
if (TREE_CODE (decl) != PARM_DECL)
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qD declared as a non-parameter", decl);
continue;
}
/* If the declaration is already marked, we have a duplicate
name. Complain and ignore the duplicate. */
else if (seen_args.contains (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"multiple parameters named %qD", decl);
TREE_PURPOSE (parm) = NULL_TREE;
continue;
}
/* If the declaration says "void", complain and turn it into
an int. */
else if (VOID_TYPE_P (TREE_TYPE (decl)))
{
error_at (DECL_SOURCE_LOCATION (decl),
"parameter %qD declared with void type", decl);
TREE_TYPE (decl) = integer_type_node;
DECL_ARG_TYPE (decl) = integer_type_node;
layout_decl (decl, 0);
}
warn_if_shadowing (decl);
}
/* If no declaration found, default to int. */
else
{
/* FIXME diagnostics: This should be the location of the argument,
not the FNDECL. E.g., for an old-style declaration
int f10(v) { blah; }
We should use the location of the V, not the F10.
Unfortunately, the V is an IDENTIFIER_NODE which has no
location. In the future we need locations for c_arg_info
entries.
See gcc.dg/Wshadow-3.c for an example of this problem. */
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, TREE_VALUE (parm), integer_type_node);
DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
pushdecl (decl);
warn_if_shadowing (decl);
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
}
TREE_PURPOSE (parm) = decl;
seen_args.add (decl);
}
/* Now examine the parms chain for incomplete declarations
and declarations with no corresponding names. */
for (b = current_scope->bindings; b; b = b->prev)
{
parm = b->decl;
if (TREE_CODE (parm) != PARM_DECL)
continue;
if (TREE_TYPE (parm) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (parm)))
{
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %qD has incomplete type", parm);
TREE_TYPE (parm) = error_mark_node;
}
if (!seen_args.contains (parm))
{
error_at (DECL_SOURCE_LOCATION (parm),
"declaration for parameter %qD but no such parameter",
parm);
/* Pretend the parameter was not missing.
This gets us to a standard state and minimizes
further error messages. */
parmids = chainon (parmids, tree_cons (parm, 0, 0));
}
}
/* Chain the declarations together in the order of the list of
names. Store that chain in the function decl, replacing the
list of names. Update the current scope to match. */
DECL_ARGUMENTS (fndecl) = NULL_TREE;
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
break;
if (parm && TREE_PURPOSE (parm))
{
last = TREE_PURPOSE (parm);
DECL_ARGUMENTS (fndecl) = last;
for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
{
DECL_CHAIN (last) = TREE_PURPOSE (parm);
last = TREE_PURPOSE (parm);
}
DECL_CHAIN (last) = NULL_TREE;
}
/* If there was a previous prototype,
set the DECL_ARG_TYPE of each argument according to
the type previously specified, and report any mismatches. */
if (current_function_prototype_arg_types)
{
tree type;
for (parm = DECL_ARGUMENTS (fndecl),
type = current_function_prototype_arg_types;
parm || (type != NULL_TREE
&& TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node);
parm = DECL_CHAIN (parm), type = TREE_CHAIN (type))
{
if (parm == NULL_TREE
|| type == NULL_TREE
|| (TREE_VALUE (type) != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node))
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (fndecl),
0, "number of arguments doesn%'t match "
"built-in prototype");
else
{
/* FIXME diagnostics: This should be the location of
FNDECL, but there is bug when a prototype is
declared inside function context, but defined
outside of it (e.g., gcc.dg/pr15698-2.c). In
which case FNDECL gets the location of the
prototype, not the definition. */
error_at (input_location,
"number of arguments doesn%'t match prototype");
error_at (current_function_prototype_locus,
"prototype declaration");
}
break;
}
/* Type for passing arg must be consistent with that
declared for the arg. ISO C says we take the unqualified
type for parameters declared with qualified type. */
if (TREE_TYPE (parm) != error_mark_node
&& TREE_VALUE (type) != error_mark_node
&& ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
!= TYPE_ATOMIC (TREE_VALUE (type)))
|| !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)),
TYPE_MAIN_VARIANT (TREE_VALUE (type)))))
{
if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
== TYPE_ATOMIC (TREE_VALUE (type)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
== TYPE_MAIN_VARIANT (TREE_VALUE (type))))
{
/* Adjust argument to match prototype. E.g. a previous
`int foo(float);' prototype causes
`int foo(x) float x; {...}' to be treated like
`int foo(float x) {...}'. This is particularly
useful for argument types like uid_t. */
DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl))
&& INTEGRAL_TYPE_P (TREE_TYPE (parm))
&& (TYPE_PRECISION (TREE_TYPE (parm))
< TYPE_PRECISION (integer_type_node)))
DECL_ARG_TYPE (parm)
= c_type_promotes_to (TREE_TYPE (parm));
/* ??? Is it possible to get here with a
built-in prototype or will it always have
been diagnosed as conflicting with an
old-style definition and discarded? */
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match built-in prototype", parm);
else
{
pedwarn (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match prototype", parm);
pedwarn (current_function_prototype_locus, OPT_Wpedantic,
"prototype declaration");
}
}
else
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
0, "argument %qD doesn%'t match "
"built-in prototype", parm);
else
{
error_at (DECL_SOURCE_LOCATION (parm),
"argument %qD doesn%'t match prototype", parm);
error_at (current_function_prototype_locus,
"prototype declaration");
}
}
}
}
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE;
}
/* Otherwise, create a prototype that would match. */
else
{
tree actual = NULL_TREE, last = NULL_TREE, type;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
{
type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
last = type;
}
type = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
/* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
of the type of this function, but we need to avoid having this
affect the types of other similarly-typed functions, so we must
first force the generation of an identical (but separate) type
node for the relevant function type. The new node we create
will be a variant of the main variant of the original function
type. */
TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl));
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
}
}
/* Store parameter declarations passed in ARG_INFO into the current
function declaration. */
void
store_parm_decls_from (struct c_arg_info *arg_info)
{
current_function_arg_info = arg_info;
store_parm_decls ();
}
/* Called by walk_tree to look for and update context-less labels. */
static tree
set_labels_context_r (tree *tp, int *walk_subtrees, void *data)
{
if (TREE_CODE (*tp) == LABEL_EXPR
&& DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE)
{
DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = static_cast<tree>(data);
*walk_subtrees = 0;
}
return NULL_TREE;
}
/* Store the parameter declarations into the current function declaration.
This is called after parsing the parameter declarations, before
digesting the body of the function.
For an old-style definition, construct a prototype out of the old-style
parameter declarations and inject it into the function's type. */
void
store_parm_decls (void)
{
tree fndecl = current_function_decl;
bool proto;
/* The argument information block for FNDECL. */
struct c_arg_info *arg_info = current_function_arg_info;
current_function_arg_info = 0;
/* True if this definition is written with a prototype. Note:
despite C99 6.7.5.3p14, we can *not* treat an empty argument
list in a function definition as equivalent to (void) -- an
empty argument list specifies the function has no parameters,
but only (void) sets up a prototype for future calls. */
proto = arg_info->types != 0;
if (proto)
store_parm_decls_newstyle (fndecl, arg_info);
else
store_parm_decls_oldstyle (fndecl, arg_info);
/* The next call to push_scope will be a function body. */
next_is_function_body = true;
/* Write a record describing this function definition to the prototypes
file (if requested). */
gen_aux_info_record (fndecl, 1, 0, proto);
/* Initialize the RTL code for the function. */
allocate_struct_function (fndecl, false);
if (warn_unused_local_typedefs)
cfun->language = ggc_cleared_alloc<language_function> ();
/* Begin the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = push_stmt_list ();
/* ??? Insert the contents of the pending sizes list into the function
to be evaluated. The only reason left to have this is
void foo(int n, int array[n++])
because we throw away the array type in favor of a pointer type, and
thus won't naturally see the SAVE_EXPR containing the increment. All
other pending sizes would be handled by gimplify_parameters. */
if (arg_info->pending_sizes)
{
/* In very special circumstances, e.g. for code like
_Atomic int i = 5;
void f (int a[i += 2]) {}
we need to execute the atomic assignment on function entry.
But in this case, it is not just a straight store, it has the
op= form, which means that build_atomic_assign has generated
gotos, labels, etc. Because at that time the function decl
for F has not been created yet, those labels do not have any
function context. But we have the fndecl now, so update the
labels accordingly. gimplify_expr would crash otherwise. */
walk_tree_without_duplicates (&arg_info->pending_sizes,
set_labels_context_r, fndecl);
add_stmt (arg_info->pending_sizes);
}
}
/* Store PARM_DECLs in PARMS into scope temporarily. Used for
c_finish_omp_declare_simd for function prototypes. No diagnostics
should be done. */
void
temp_store_parm_decls (tree fndecl, tree parms)
{
push_scope ();
for (tree p = parms; p; p = DECL_CHAIN (p))
{
DECL_CONTEXT (p) = fndecl;
if (DECL_NAME (p))
bind (DECL_NAME (p), p, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
}
/* Undo what temp_store_parm_decls did. */
void
temp_pop_parm_decls (void)
{
/* Clear all bindings in this temporary scope, so that
pop_scope doesn't create a BLOCK. */
struct c_binding *b = current_scope->bindings;
current_scope->bindings = NULL;
for (; b; b = free_binding_and_advance (b))
{
gcc_assert (TREE_CODE (b->decl) == PARM_DECL
|| b->decl == error_mark_node);
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
pop_scope ();
}
/* Finish up a function declaration and compile that function
all the way to assembler language output. Then free the storage
for the function definition.
This is called after parsing the body of the function definition. */
void
finish_function (void)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
if (TREE_CODE (fndecl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (fndecl)))
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
/* Must mark the RESULT_DECL as being in this function. */
if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node)
DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted
&& TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
== integer_type_node && flag_isoc99)
{
/* Hack. We don't want the middle-end to warn that this return
is unreachable, so we mark its location as special. Using
UNKNOWN_LOCATION has the problem that it gets clobbered in
annotate_one_with_locus. A cleaner solution might be to
ensure ! should_carry_locus_p (stmt), but that needs a flag.
*/
c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE);
}
/* Tie off the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
finish_fname_decls ();
/* Complain if there's just no return statement. */
if (warn_return_type
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
/* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't complain if we are declared noreturn. */
&& !TREE_THIS_VOLATILE (fndecl)
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
/* Or if they didn't actually specify a return type. */
&& !C_FUNCTION_IMPLICIT_INT (fndecl)
/* Normally, with -Wreturn-type, flow will complain, but we might
optimize out static functions. */
&& !TREE_PUBLIC (fndecl))
{
warning (OPT_Wreturn_type,
"no return statement in function returning non-void");
TREE_NO_WARNING (fndecl) = 1;
}
/* Complain about parameters that are only set, but never otherwise used. */
if (warn_unused_but_set_parameter)
{
tree decl;
for (decl = DECL_ARGUMENTS (fndecl);
decl;
decl = DECL_CHAIN (decl))
if (TREE_USED (decl)
&& TREE_CODE (decl) == PARM_DECL
&& !DECL_READ_P (decl)
&& DECL_NAME (decl)
&& !DECL_ARTIFICIAL (decl)
&& !TREE_NO_WARNING (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_but_set_parameter,
"parameter %qD set but not used", decl);
}
/* Complain about locally defined typedefs that are not used in this
function. */
maybe_warn_unused_local_typedefs ();
/* Possibly warn about unused parameters. */
if (warn_unused_parameter)
do_warn_unused_parameter (fndecl);
/* Store the end of the function, so that we get good line number
info for the epilogue. */
cfun->function_end_locus = input_location;
/* Finalize the ELF visibility for the function. */
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl)
&& (flag_gnu89_inline
|| lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl))))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
/* Genericize before inlining. Delay genericizing nested functions
until their parent function is genericized. Since finalizing
requires GENERIC, delay that as well. */
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
{
invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl);
c_genericize (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
if (symtab->global_info_ready)
{
cgraph_node::add_new_function (fndecl, false);
return;
}
cgraph_node::finalize_function (fndecl, false);
}
else
{
/* Register this function with cgraph just far enough to get it
added to our parent's nested function list. Handy, since the
C front end doesn't have such a list. */
(void) cgraph_node::get_create (fndecl);
}
}
if (!decl_function_context (fndecl))
undef_nested_function = false;
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
/* We're leaving the context of this function, so zap cfun.
It's still in DECL_STRUCT_FUNCTION, and we'll restore it in
tree_rest_of_compilation. */
set_cfun (NULL);
invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl);
current_function_decl = NULL;
}
/* Check the declarations given in a for-loop for satisfying the C99
constraints. If exactly one such decl is found, return it. LOC is
the location of the opening parenthesis of the for loop. The last
parameter allows you to control the "for loop initial declarations
are only allowed in C99 mode". Normally, you should pass
flag_isoc99 as that parameter. But in some cases (Objective-C
foreach loop, for example) we want to run the checks in this
function even if not in C99 mode, so we allow the caller to turn
off the error about not being in C99 mode.
*/
tree
check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error)
{
struct c_binding *b;
tree one_decl = NULL_TREE;
int n_decls = 0;
if (!turn_off_iso_c99_error)
{
static bool hint = true;
/* If we get here, declarations have been used in a for loop without
the C99 for loop scope. This doesn't make much sense, so don't
allow it. */
error_at (loc, "%<for%> loop initial declarations "
"are only allowed in C99 or C11 mode");
if (hint)
{
inform (loc,
"use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 "
"to compile your code");
hint = false;
}
return NULL_TREE;
}
/* C99 subclause 6.8.5 paragraph 3:
[#3] The declaration part of a for statement shall only
declare identifiers for objects having storage class auto or
register.
It isn't clear whether, in this sentence, "identifiers" binds to
"shall only declare" or to "objects" - that is, whether all identifiers
declared must be identifiers for objects, or whether the restriction
only applies to those that are. (A question on this in comp.std.c
in November 2000 received no answer.) We implement the strictest
interpretation, to avoid creating an extension which later causes
problems. */
for (b = current_scope->bindings; b; b = b->prev)
{
tree id = b->id;
tree decl = b->decl;
if (!id)
continue;
switch (TREE_CODE (decl))
{
case VAR_DECL:
{
location_t decl_loc = DECL_SOURCE_LOCATION (decl);
if (TREE_STATIC (decl))
error_at (decl_loc,
"declaration of static variable %qD in %<for%> loop "
"initial declaration", decl);
else if (DECL_EXTERNAL (decl))
error_at (decl_loc,
"declaration of %<extern%> variable %qD in %<for%> loop "
"initial declaration", decl);
}
break;
case RECORD_TYPE:
error_at (loc,
"%<struct %E%> declared in %<for%> loop initial "
"declaration", id);
break;
case UNION_TYPE:
error_at (loc,
"%<union %E%> declared in %<for%> loop initial declaration",
id);
break;
case ENUMERAL_TYPE:
error_at (loc, "%<enum %E%> declared in %<for%> loop "
"initial declaration", id);
break;
default:
error_at (loc, "declaration of non-variable "
"%qD in %<for%> loop initial declaration", decl);
}
n_decls++;
one_decl = decl;
}
return n_decls == 1 ? one_decl : NULL_TREE;
}
/* Save and reinitialize the variables
used during compilation of a C function. */
void
c_push_function_context (void)
{
struct language_function *p = cfun->language;
/* cfun->language might have been already allocated by the use of
-Wunused-local-typedefs. In that case, just re-use it. */
if (p == NULL)
cfun->language = p = ggc_cleared_alloc<language_function> ();
p->base.x_stmt_tree = c_stmt_tree;
c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_break_label = c_break_label;
p->x_cont_label = c_cont_label;
p->x_switch_stack = c_switch_stack;
p->arg_info = current_function_arg_info;
p->returns_value = current_function_returns_value;
p->returns_null = current_function_returns_null;
p->returns_abnormally = current_function_returns_abnormally;
p->warn_about_return_type = warn_about_return_type;
push_function_context ();
}
/* Restore the variables used during compilation of a C function. */
void
c_pop_function_context (void)
{
struct language_function *p;
pop_function_context ();
p = cfun->language;
/* When -Wunused-local-typedefs is in effect, cfun->languages is
used to store data throughout the life time of the current cfun,
So don't deallocate it. */
if (!warn_unused_local_typedefs)
cfun->language = NULL;
if (DECL_STRUCT_FUNCTION (current_function_decl) == 0
&& DECL_SAVED_TREE (current_function_decl) == NULL_TREE)
{
/* Stop pointing to the local nodes about to be freed. */
/* But DECL_INITIAL must remain nonzero so we know this
was an actual function definition. */
DECL_INITIAL (current_function_decl) = error_mark_node;
DECL_ARGUMENTS (current_function_decl) = NULL_TREE;
}
c_stmt_tree = p->base.x_stmt_tree;
p->base.x_stmt_tree.x_cur_stmt_list = NULL;
c_break_label = p->x_break_label;
c_cont_label = p->x_cont_label;
c_switch_stack = p->x_switch_stack;
current_function_arg_info = p->arg_info;
current_function_returns_value = p->returns_value;
current_function_returns_null = p->returns_null;
current_function_returns_abnormally = p->returns_abnormally;
warn_about_return_type = p->warn_about_return_type;
}
/* The functions below are required for functionality of doing
function at once processing in the C front end. Currently these
functions are not called from anywhere in the C front end, but as
these changes continue, that will change. */
/* Returns the stmt_tree (if any) to which statements are currently
being added. If there is no active statement-tree, NULL is
returned. */
stmt_tree
current_stmt_tree (void)
{
return &c_stmt_tree;
}
/* Return the global value of T as a symbol. */
tree
identifier_global_value (tree t)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return NULL_TREE;
}
/* In C, the only C-linkage public declaration is at file scope. */
tree
c_linkage_bindings (tree name)
{
return identifier_global_value (name);
}
/* Record a builtin type for C. If NAME is non-NULL, it is the name used;
otherwise the name is found in ridpointers from RID_INDEX. */
void
record_builtin_type (enum rid rid_index, const char *name, tree type)
{
tree id, decl;
if (name == 0)
id = ridpointers[(int) rid_index];
else
id = get_identifier (name);
decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type);
pushdecl (decl);
if (debug_hooks->type_decl)
debug_hooks->type_decl (decl, false);
}
/* Build the void_list_node (void_type_node having been created). */
tree
build_void_list_node (void)
{
tree t = build_tree_list (NULL_TREE, void_type_node);
return t;
}
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
build_c_parm (struct c_declspecs *specs, tree attrs,
struct c_declarator *declarator,
location_t loc)
{
struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm);
ret->specs = specs;
ret->attrs = attrs;
ret->declarator = declarator;
ret->loc = loc;
return ret;
}
/* Return a declarator with nested attributes. TARGET is the inner
declarator to which these attributes apply. ATTRS are the
attributes. */
struct c_declarator *
build_attrs_declarator (tree attrs, struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_attrs;
ret->declarator = target;
ret->u.attrs = attrs;
return ret;
}
/* Return a declarator for a function with arguments specified by ARGS
and return type specified by TARGET. */
struct c_declarator *
build_function_declarator (struct c_arg_info *args,
struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_function;
ret->declarator = target;
ret->u.arg_info = args;
return ret;
}
/* Return a declarator for the identifier IDENT (which may be
NULL_TREE for an abstract declarator). */
struct c_declarator *
build_id_declarator (tree ident)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_id;
ret->declarator = 0;
ret->u.id = ident;
/* Default value - may get reset to a more precise location. */
ret->id_loc = input_location;
return ret;
}
/* Return something to represent absolute declarators containing a *.
TARGET is the absolute declarator that the * contains.
TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes
to apply to the pointer type. */
struct c_declarator *
make_pointer_declarator (struct c_declspecs *type_quals_attrs,
struct c_declarator *target)
{
tree attrs;
int quals = 0;
struct c_declarator *itarget = target;
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
if (type_quals_attrs)
{
attrs = type_quals_attrs->attrs;
quals = quals_from_declspecs (type_quals_attrs);
if (attrs != NULL_TREE)
itarget = build_attrs_declarator (attrs, target);
}
ret->kind = cdk_pointer;
ret->declarator = itarget;
ret->u.pointer_quals = quals;
return ret;
}
/* Return a pointer to a structure for an empty list of declaration
specifiers. */
struct c_declspecs *
build_null_declspecs (void)
{
struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs);
memset (ret, 0, sizeof *ret);
ret->align_log = -1;
ret->typespec_word = cts_none;
ret->storage_class = csc_none;
ret->expr_const_operands = true;
ret->typespec_kind = ctsk_none;
ret->address_space = ADDR_SPACE_GENERIC;
return ret;
}
/* Add the address space ADDRSPACE to the declaration specifiers
SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_addrspace (source_location location,
struct c_declspecs *specs, addr_space_t as)
{
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
if (!ADDR_SPACE_GENERIC_P (specs->address_space)
&& specs->address_space != as)
error ("incompatible address space qualifiers %qs and %qs",
c_addr_space_name (as),
c_addr_space_name (specs->address_space));
else
{
specs->address_space = as;
specs->locations[cdw_address_space] = location;
}
return specs;
}
/* Add the type qualifier QUAL to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_qual (source_location loc,
struct c_declspecs *specs, tree qual)
{
enum rid i;
bool dupe = false;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (qual));
i = C_RID_CODE (qual);
location_t prev_loc = UNKNOWN_LOCATION;
switch (i)
{
case RID_CONST:
dupe = specs->const_p;
specs->const_p = true;
prev_loc = specs->locations[cdw_const];
specs->locations[cdw_const] = loc;
break;
case RID_VOLATILE:
dupe = specs->volatile_p;
specs->volatile_p = true;
prev_loc = specs->locations[cdw_volatile];
specs->locations[cdw_volatile] = loc;
break;
case RID_RESTRICT:
dupe = specs->restrict_p;
specs->restrict_p = true;
prev_loc = specs->locations[cdw_restrict];
specs->locations[cdw_restrict] = loc;
break;
case RID_ATOMIC:
dupe = specs->atomic_p;
specs->atomic_p = true;
prev_loc = specs->locations[cdw_atomic];
specs->locations[cdw_atomic] = loc;
break;
default:
gcc_unreachable ();
}
if (dupe)
{
bool warned = pedwarn_c90 (loc, OPT_Wpedantic,
"duplicate %qE declaration specifier", qual);
if (!warned
&& warn_duplicate_decl_specifier
&& prev_loc >= RESERVED_LOCATION_COUNT
&& !from_macro_expansion_at (prev_loc)
&& !from_macro_expansion_at (loc))
warning_at (loc, OPT_Wduplicate_decl_specifier,
"duplicate %qE declaration specifier", qual);
}
return specs;
}
/* Add the type specifier TYPE to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_type (location_t loc, struct c_declspecs *specs,
struct c_typespec spec)
{
tree type = spec.spec;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->typespec_kind = spec.kind;
if (TREE_DEPRECATED (type))
specs->deprecated_p = true;
/* Handle type specifier keywords. */
if (TREE_CODE (type) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (type)
&& C_RID_CODE (type) != RID_CXX_COMPAT_WARN)
{
enum rid i = C_RID_CODE (type);
if (specs->type)
{
error_at (loc, "two or more data types in declaration specifiers");
return specs;
}
if ((int) i <= (int) RID_LAST_MODIFIER)
{
/* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */
bool dupe = false;
switch (i)
{
case RID_LONG:
if (specs->long_long_p)
{
error_at (loc, "%<long long long%> is too long for GCC");
break;
}
if (specs->long_p)
{
if (specs->typespec_word == cts_double)
{
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
break;
}
pedwarn_c90 (loc, OPT_Wlong_long,
"ISO C90 does not support %<long long%>");
specs->long_long_p = 1;
specs->locations[cdw_long_long] = loc;
break;
}
if (specs->short_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<long%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<long%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<long%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<long%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->long_p = true;
specs->locations[cdw_long] = loc;
}
break;
case RID_SHORT:
dupe = specs->short_p;
if (specs->long_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<short%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<short%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<short%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<short%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->short_p = true;
specs->locations[cdw_short] = loc;
}
break;
case RID_SIGNED:
dupe = specs->signed_p;
if (specs->unsigned_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<signed%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<signed%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<signed%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->signed_p = true;
specs->locations[cdw_signed] = loc;
}
break;
case RID_UNSIGNED:
dupe = specs->unsigned_p;
if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<unsigned%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<unsigned%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<unsigned%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->unsigned_p = true;
specs->locations[cdw_unsigned] = loc;
}
break;
case RID_COMPLEX:
dupe = specs->complex_p;
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<complex%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<complex%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<complex%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_fract)
error_at (loc,
("both %<complex%> and %<_Fract%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_accum)
error_at (loc,
("both %<complex%> and %<_Accum%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<complex%> and %<_Sat%> in "
"declaration specifiers"));
else
{
specs->complex_p = true;
specs->locations[cdw_complex] = loc;
}
break;
case RID_SAT:
dupe = specs->saturating_p;
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support saturating types");
if (specs->typespec_word == cts_int_n)
{
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
}
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_floatn_nx)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<_Sat%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<_Sat%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<_Sat%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<_Sat%> and %<complex%> in "
"declaration specifiers"));
else
{
specs->saturating_p = true;
specs->locations[cdw_saturating] = loc;
}
break;
default:
gcc_unreachable ();
}
if (dupe)
error_at (loc, "duplicate %qE", type);
return specs;
}
else
{
/* "void", "_Bool", "char", "int", "float", "double",
"_FloatN", "_FloatNx", "_Decimal32", "__intN",
"_Decimal64", "_Decimal128", "_Fract", "_Accum" or
"__auto_type". */
if (specs->typespec_word != cts_none)
{
error_at (loc,
"two or more data types in declaration specifiers");
return specs;
}
switch (i)
{
case RID_AUTO_TYPE:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_auto_type;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
specs->int_n_idx = i - RID_INT_N_0;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support %<__int%d%> types",
int_n_data[specs->int_n_idx].bitsize);
if (specs->long_p)
error_at (loc,
("both %<__int%d%> and %<long%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->short_p)
error_at (loc,
("both %<__int%d%> and %<short%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (! int_n_enabled_p[specs->int_n_idx])
{
specs->typespec_word = cts_int_n;
error_at (loc,
"%<__int%d%> is not supported on this target",
int_n_data[specs->int_n_idx].bitsize);
}
else
{
specs->typespec_word = cts_int_n;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_VOID:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_void;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_BOOL:
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support boolean types");
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_bool;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_CHAR:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_char;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT:
if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_int;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_FLOAT:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_float;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DOUBLE:
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_double;
specs->locations[cdw_typespec] = loc;
}
return specs;
CASE_RID_FLOATN_NX:
specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support the %<_Float%d%s%> type",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Float%d%s%> in "
"declaration specifiers"),
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
{
specs->typespec_word = cts_floatn_nx;
error_at (loc,
"%<_Float%d%s%> is not supported on this target",
floatn_nx_types[specs->floatn_nx_idx].n,
(floatn_nx_types[specs->floatn_nx_idx].extended
? "x"
: ""));
}
else
{
specs->typespec_word = cts_floatn_nx;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
{
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
else if (i == RID_DFLOAT64)
str = "_Decimal64";
else
str = "_Decimal128";
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %qs in "
"declaration specifiers"),
str);
if (specs->long_p)
error_at (loc,
("both %<long%> and %qs in "
"declaration specifiers"),
str);
else if (specs->short_p)
error_at (loc,
("both %<short%> and %qs in "
"declaration specifiers"),
str);
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %qs in "
"declaration specifiers"),
str);
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %qs in "
"declaration specifiers"),
str);
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_DFLOAT32)
specs->typespec_word = cts_dfloat32;
else if (i == RID_DFLOAT64)
specs->typespec_word = cts_dfloat64;
else
specs->typespec_word = cts_dfloat128;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.decimal_float_supported_p ())
error_at (loc,
("decimal floating point not supported "
"for this target"));
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support decimal floating point");
return specs;
case RID_FRACT:
case RID_ACCUM:
{
const char *str;
if (i == RID_FRACT)
str = "_Fract";
else
str = "_Accum";
if (specs->complex_p)
error_at (loc,
("both %<complex%> and %qs in "
"declaration specifiers"),
str);
else if (i == RID_FRACT)
specs->typespec_word = cts_fract;
else
specs->typespec_word = cts_accum;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.fixed_point_supported_p ())
error_at (loc,
"fixed-point types not supported for this target");
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support fixed-point types");
return specs;
default:
/* ObjC reserved word "id", handled below. */
break;
}
}
}
/* Now we have a typedef (a TYPE_DECL node), an identifier (some
form of ObjC type, cases such as "int" and "long" being handled
above), a TYPE (struct, union, enum and typeof specifiers) or an
ERROR_MARK. In none of these cases may there have previously
been any type specifiers. */
if (specs->type || specs->typespec_word != cts_none
|| specs->long_p || specs->short_p || specs->signed_p
|| specs->unsigned_p || specs->complex_p)
error_at (loc, "two or more data types in declaration specifiers");
else if (TREE_CODE (type) == TYPE_DECL)
{
if (TREE_TYPE (type) == error_mark_node)
; /* Allow the type to default to int to avoid cascading errors. */
else
{
specs->type = TREE_TYPE (type);
specs->decl_attr = DECL_ATTRIBUTES (type);
specs->typedef_p = true;
specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type);
specs->locations[cdw_typedef] = loc;
/* If this typedef name is defined in a struct, then a C++
lookup would return a different value. */
if (warn_cxx_compat
&& I_SYMBOL_BINDING (DECL_NAME (type))->in_struct)
warning_at (loc, OPT_Wc___compat,
"C++ lookup of %qD would return a field, not a type",
type);
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
{
tree t = lookup_name (type);
if (!t || TREE_CODE (t) != TYPE_DECL)
error_at (loc, "%qE fails to be a typedef or built in type", type);
else if (TREE_TYPE (t) == error_mark_node)
;
else
{
specs->type = TREE_TYPE (t);
specs->locations[cdw_typespec] = loc;
}
}
else
{
if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof)
{
specs->typedef_p = true;
specs->locations[cdw_typedef] = loc;
if (spec.expr)
{
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr),
specs->expr, spec.expr);
else
specs->expr = spec.expr;
specs->expr_const_operands &= spec.expr_const_operands;
}
}
specs->type = type;
}
return specs;
}
/* Add the storage class specifier or function specifier SCSPEC to the
declaration specifiers SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_scspec (source_location loc,
struct c_declspecs *specs,
tree scspec)
{
enum rid i;
enum c_storage_class n = csc_none;
bool dupe = false;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
case RID_INLINE:
/* C99 permits duplicate inline. Although of doubtful utility,
it seems simplest to permit it in gnu89 mode as well, as
there is also little utility in maintaining this as a
difference between gnu89 and C99 inline. */
dupe = false;
specs->inline_p = true;
specs->locations[cdw_inline] = loc;
break;
case RID_NORETURN:
/* Duplicate _Noreturn is permitted. */
dupe = false;
specs->noreturn_p = true;
specs->locations[cdw_noreturn] = loc;
break;
case RID_THREAD:
dupe = specs->thread_p;
if (specs->storage_class == csc_auto)
error ("%qE used with %<auto%>", scspec);
else if (specs->storage_class == csc_register)
error ("%qE used with %<register%>", scspec);
else if (specs->storage_class == csc_typedef)
error ("%qE used with %<typedef%>", scspec);
else
{
specs->thread_p = true;
specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec),
"__thread") == 0);
/* A diagnostic is not required for the use of this
identifier in the implementation namespace; only diagnose
it for the C11 spelling because of existing code using
the other spelling. */
if (!specs->thread_gnu_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %qE", scspec);
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %qE", scspec);
}
specs->locations[cdw_thread] = loc;
}
break;
case RID_AUTO:
n = csc_auto;
break;
case RID_EXTERN:
n = csc_extern;
/* Diagnose "__thread extern". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<extern%>");
break;
case RID_REGISTER:
n = csc_register;
break;
case RID_STATIC:
n = csc_static;
/* Diagnose "__thread static". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<static%>");
break;
case RID_TYPEDEF:
n = csc_typedef;
break;
default:
gcc_unreachable ();
}
if (n != csc_none && n == specs->storage_class)
dupe = true;
if (dupe)
{
if (i == RID_THREAD)
error ("duplicate %<_Thread_local%> or %<__thread%>");
else
error ("duplicate %qE", scspec);
}
if (n != csc_none)
{
if (specs->storage_class != csc_none && n != specs->storage_class)
{
error ("multiple storage classes in declaration specifiers");
}
else
{
specs->storage_class = n;
specs->locations[cdw_storage_class] = loc;
if (n != csc_extern && n != csc_static && specs->thread_p)
{
error ("%qs used with %qE",
specs->thread_gnu_p ? "__thread" : "_Thread_local",
scspec);
specs->thread_p = false;
}
}
}
return specs;
}
/* Add the attributes ATTRS to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs)
{
specs->attrs = chainon (attrs, specs->attrs);
specs->locations[cdw_attributes] = loc;
specs->declspecs_seen_p = true;
return specs;
}
/* Add an _Alignas specifier (expression ALIGN, or type whose
alignment is ALIGN) to the declaration specifiers SPECS, returning
SPECS. */
struct c_declspecs *
declspecs_add_alignas (source_location loc,
struct c_declspecs *specs, tree align)
{
int align_log;
specs->alignas_p = true;
specs->locations[cdw_alignas] = loc;
if (align == error_mark_node)
return specs;
align_log = check_user_alignment (align, true);
if (align_log > specs->align_log)
specs->align_log = align_log;
return specs;
}
/* Combine "long", "short", "signed", "unsigned" and "_Complex" type
specifiers with any other type specifier to determine the resulting
type. This is where ISO C checks on complex types are made, since
"_Complex long" is a prefix of the valid ISO C type "_Complex long
double". */
struct c_declspecs *
finish_declspecs (struct c_declspecs *specs)
{
/* If a type was specified as a whole, we have no modifiers and are
done. */
if (specs->type != NULL_TREE)
{
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Set a dummy type. */
if (TREE_CODE (specs->type) == ERROR_MARK)
specs->type = integer_type_node;
return specs;
}
/* If none of "void", "_Bool", "char", "int", "float" or "double"
has been specified, treat it as "int" unless "_Complex" is
present and there are no other specifiers. If we just have
"_Complex", it is equivalent to "_Complex double", but e.g.
"_Complex short" is equivalent to "_Complex short int". */
if (specs->typespec_word == cts_none)
{
if (specs->saturating_p)
{
error_at (specs->locations[cdw_saturating],
"%<_Sat%> is used without %<_Fract%> or %<_Accum%>");
if (!targetm.fixed_point_supported_p ())
error_at (specs->locations[cdw_saturating],
"fixed-point types not supported for this target");
specs->typespec_word = cts_fract;
}
else if (specs->long_p || specs->short_p
|| specs->signed_p || specs->unsigned_p)
{
specs->typespec_word = cts_int;
}
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
else
{
specs->typespec_word = cts_int;
specs->default_int_p = true;
/* We don't diagnose this here because grokdeclarator will
give more specific diagnostics according to whether it is
a function definition. */
}
}
/* If "signed" was specified, record this to distinguish "int" and
"signed int" in the case of a bit-field with
-funsigned-bitfields. */
specs->explicit_signed_p = specs->signed_p;
/* Now compute the actual type. */
switch (specs->typespec_word)
{
case cts_auto_type:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Type to be filled in later. */
break;
case cts_void:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = void_type_node;
break;
case cts_bool:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = boolean_type_node;
break;
case cts_char:
gcc_assert (!specs->long_p && !specs->short_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->signed_p)
specs->type = signed_char_type_node;
else if (specs->unsigned_p)
specs->type = unsigned_char_type_node;
else
specs->type = char_type_node;
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int_n:
gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (! int_n_enabled_p[specs->int_n_idx])
specs->type = integer_type_node;
else
specs->type = (specs->unsigned_p
? int_n_trees[specs->int_n_idx].unsigned_type
: int_n_trees[specs->int_n_idx].signed_type);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int:
gcc_assert (!(specs->long_p && specs->short_p));
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->long_long_p)
specs->type = (specs->unsigned_p
? long_long_unsigned_type_node
: long_long_integer_type_node);
else if (specs->long_p)
specs->type = (specs->unsigned_p
? long_unsigned_type_node
: long_integer_type_node);
else if (specs->short_p)
specs->type = (specs->unsigned_p
? short_unsigned_type_node
: short_integer_type_node);
else
specs->type = (specs->unsigned_p
? unsigned_type_node
: integer_type_node);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_float:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
specs->type = (specs->complex_p
? complex_float_type_node
: float_type_node);
break;
case cts_double:
gcc_assert (!specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (specs->long_p)
{
specs->type = (specs->complex_p
? complex_long_double_type_node
: long_double_type_node);
}
else
{
specs->type = (specs->complex_p
? complex_double_type_node
: double_type_node);
}
break;
case cts_floatn_nx:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE)
specs->type = integer_type_node;
else if (specs->complex_p)
specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
else
specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx);
break;
case cts_dfloat32:
case cts_dfloat64:
case cts_dfloat128:
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p && !specs->complex_p);
if (specs->typespec_word == cts_dfloat32)
specs->type = dfloat32_type_node;
else if (specs->typespec_word == cts_dfloat64)
specs->type = dfloat64_type_node;
else
specs->type = dfloat128_type_node;
break;
case cts_fract:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_fract_type_node
: sat_long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_fract_type_node
: sat_long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_fract_type_node
: sat_short_fract_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_fract_type_node
: sat_fract_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_fract_type_node
: long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_fract_type_node
: long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_fract_type_node
: short_fract_type_node;
else
specs->type = specs->unsigned_p
? unsigned_fract_type_node
: fract_type_node;
}
break;
case cts_accum:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_accum_type_node
: sat_long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_accum_type_node
: sat_long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_accum_type_node
: sat_short_accum_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_accum_type_node
: sat_accum_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_accum_type_node
: long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_accum_type_node
: long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_accum_type_node
: short_accum_type_node;
else
specs->type = specs->unsigned_p
? unsigned_accum_type_node
: accum_type_node;
}
break;
default:
gcc_unreachable ();
}
return specs;
}
/* Perform final processing on one file scope's declarations (or the
external scope's declarations), GLOBALS. */
static void
c_write_global_declarations_1 (tree globals)
{
tree decl;
bool reconsider;
/* Process the decls in the order they were written. */
for (decl = globals; decl; decl = DECL_CHAIN (decl))
{
/* Check for used but undefined static functions using the C
standard's definition of "used", and set TREE_NO_WARNING so
that check_global_declaration doesn't repeat the check. */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_INITIAL (decl) == NULL_TREE
&& DECL_EXTERNAL (decl)
&& !TREE_PUBLIC (decl))
{
if (C_DECL_USED (decl))
{
pedwarn (input_location, 0, "%q+F used but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
/* For -Wunused-function warn about unused static prototypes. */
else if (warn_unused_function
&& ! DECL_ARTIFICIAL (decl)
&& ! TREE_NO_WARNING (decl))
{
warning (OPT_Wunused_function,
"%q+F declared %<static%> but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
}
wrapup_global_declaration_1 (decl);
}
do
{
reconsider = false;
for (decl = globals; decl; decl = DECL_CHAIN (decl))
reconsider |= wrapup_global_declaration_2 (decl);
}
while (reconsider);
}
/* Callback to collect a source_ref from a DECL. */
static void
collect_source_ref_cb (tree decl)
{
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (LOCATION_FILE (decl_sloc (decl, false)));
}
/* Preserve the external declarations scope across a garbage collect. */
static GTY(()) tree ext_block;
/* Collect all references relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
tree t;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
}
/* Iterate over all global declarations and call CALLBACK. */
static void
for_each_global_decl (void (*callback) (tree decl))
{
tree t;
tree decls;
tree decl;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
/* Perform any final parser cleanups and generate initial debugging
information. */
void
c_parse_final_cleanups (void)
{
tree t;
unsigned i;
/* We don't want to do this if generating a PCH. */
if (pch_file)
return;
timevar_stop (TV_PHASE_PARSING);
timevar_start (TV_PHASE_DEFERRED);
/* Do the Objective-C stuff. This is where all the Objective-C
module stuff gets generated (symtab, class/protocol/selector
lists etc). */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* Close the external scope. */
ext_block = pop_scope ();
external_scope = 0;
gcc_assert (!current_scope);
/* Handle -fdump-ada-spec[-slim]. */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
/* Build a table of files to generate specs for */
if (flag_dump_ada_spec_slim)
collect_source_ref (main_input_filename);
else
for_each_global_decl (collect_source_ref_cb);
dump_ada_specs (collect_all_refs, NULL);
}
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations. */
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_PARSING);
ext_block = NULL;
}
/* Register reserved keyword WORD as qualifier for address space AS. */
void
c_register_addr_space (const char *word, addr_space_t as)
{
int rid = RID_FIRST_ADDR_SPACE + as;
tree id;
/* Address space qualifiers are only supported
in C with GNU extensions enabled. */
if (c_dialect_objc () || flag_no_asm)
return;
id = get_identifier (word);
C_SET_RID_CODE (id, rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [rid] = id;
}
/* Return identifier to look up for omp declare reduction. */
tree
c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id)
{
const char *p = NULL;
switch (reduction_code)
{
case PLUS_EXPR: p = "+"; break;
case MULT_EXPR: p = "*"; break;
case MINUS_EXPR: p = "-"; break;
case BIT_AND_EXPR: p = "&"; break;
case BIT_XOR_EXPR: p = "^"; break;
case BIT_IOR_EXPR: p = "|"; break;
case TRUTH_ANDIF_EXPR: p = "&&"; break;
case TRUTH_ORIF_EXPR: p = "||"; break;
case MIN_EXPR: p = "min"; break;
case MAX_EXPR: p = "max"; break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
size_t len = strlen (p);
char *name = XALLOCAVEC (char, lenp + len);
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
return get_identifier (name);
}
/* Lookup REDUCTION_ID in the current scope, or create an artificial
VAR_DECL, bind it into the current scope and return it. */
tree
c_omp_reduction_decl (tree reduction_id)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
if (b != NULL && B_IN_CURRENT_SCOPE (b))
return b->decl;
tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL,
reduction_id, integer_type_node);
DECL_ARTIFICIAL (decl) = 1;
DECL_EXTERNAL (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION);
return decl;
}
/* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */
tree
c_omp_reduction_lookup (tree reduction_id, tree type)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
while (b)
{
tree t;
for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
return TREE_VALUE (t);
b = b->shadowed;
}
return error_mark_node;
}
/* Helper function called via walk_tree, to diagnose invalid
#pragma omp declare reduction combiners or initializers. */
tree
c_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
tree *vars = (tree *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != vars[0]
&& *tp != vars[1])
{
location_t loc = DECL_SOURCE_LOCATION (vars[0]);
if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
#include "gt-c-c-decl.h"
|
parallel_stable_sort.h | /*
Copyright (C) 2014 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <omp.h>
#include "pss_common.h"
namespace pss {
namespace internal {
// Merge sequences [xs,xe) and [ys,ye) to output sequence [zs,zs+(xe-xs)+(ye-ys))
// Destroy input sequence iff destroy==true
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename RandomAccessIterator3, typename Compare>
#if __INTEL_COMPILER<=1500
// Work around bug where firstprivate applied to formal parameter does not work.
void parallel_move_merge( RandomAccessIterator1 xs_, RandomAccessIterator1 xe, RandomAccessIterator2 ys_, RandomAccessIterator2 ye, RandomAccessIterator3 zs_, bool destroy, Compare comp ) {
RandomAccessIterator1 xs = xs_;
RandomAccessIterator2 ys = ys_;
RandomAccessIterator3 zs = zs_;
#else
void parallel_move_merge( RandomAccessIterator1 xs, RandomAccessIterator1 xe, RandomAccessIterator2 ys, RandomAccessIterator2 ye, RandomAccessIterator3 zs, bool destroy, Compare comp ) {
#endif
const size_t MERGE_CUT_OFF = 2000;
while( (xe-xs) + (ye-ys) > MERGE_CUT_OFF ) {
RandomAccessIterator1 xm;
RandomAccessIterator2 ym;
if( xe-xs < ye-ys ) {
ym = ys+(ye-ys)/2;
xm = std::upper_bound(xs,xe,*ym,comp);
} else {
xm = xs+(xe-xs)/2;
ym = std::lower_bound(ys,ye,*xm,comp);
}
#pragma omp task untied mergeable firstprivate(xs,xm,ys,ym,zs,destroy,comp)
parallel_move_merge( xs, xm, ys, ym, zs, destroy, comp );
zs += (xm-xs) + (ym-ys);
xs = xm;
ys = ym;
}
serial_move_merge( xs, xe, ys, ye, zs, comp );
if( destroy ) {
serial_destroy( xs, xe );
serial_destroy( ys, ye );
}
#pragma omp taskwait
}
// Sorts [xs,xe), where zs[0:xe-xs) is temporary buffer supplied by caller.
// Result is in [xs,xe) if inplace==true, otherwise in [zs,zs+(xe-xs))
template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Compare>
void parallel_stable_sort_aux( RandomAccessIterator1 xs, RandomAccessIterator1 xe, RandomAccessIterator2 zs, int inplace, Compare comp ) {
typedef typename std::iterator_traits<RandomAccessIterator2>::value_type T;
const size_t SORT_CUT_OFF = 500;
if( xe-xs<=SORT_CUT_OFF ) {
stable_sort_base_case(xs, xe, zs, inplace, comp);
} else {
RandomAccessIterator1 xm = xs + (xe-xs)/2;
RandomAccessIterator2 zm = zs + (xm-xs);
RandomAccessIterator2 ze = zs + (xe-xs);
#pragma omp task
parallel_stable_sort_aux( xs, xm, zs, !inplace, comp );
parallel_stable_sort_aux( xm, xe, zm, !inplace, comp );
#pragma omp taskwait
if( inplace )
parallel_move_merge( zs, zm, zm, ze, xs, inplace==2, comp );
else
parallel_move_merge( xs, xm, xm, xe, zs, false, comp );
}
}
} // namespace internal
template<typename RandomAccessIterator, typename Compare>
void parallel_stable_sort( RandomAccessIterator xs, RandomAccessIterator xe, Compare comp ) {
typedef typename std::iterator_traits<RandomAccessIterator>::value_type T;
if( internal::raw_buffer z = internal::raw_buffer( sizeof(T)*(xe-xs) ) )
if( omp_get_num_threads() > 1 )
internal::parallel_stable_sort_aux( xs, xe, (T*)z.get(), 2, comp );
else
#pragma omp parallel
#pragma omp master
internal::parallel_stable_sort_aux( xs, xe, (T*)z.get(), 2, comp );
else
// Not enough memory available - fall back on serial sort
std::stable_sort( xs, xe, comp );
}
} // namespace pss
|
aux_propagate_comp.h | #ifndef AUX_PROPAGATE_H
#define AUX_PROPAGATE_H
#include <cvpp/containers/matrix.h>
#include <cvpp/algorithms/icp/libicp.h>
#include "class_map.h"
#include "class_group.h"
using namespace cvpp;
/// Propagate objects from Next to Main
void propagateObjects( Map& prev , Map& next , Map& main ,
const int& thrctr , const double& thrfit )
{
SeqMatd M1p( main.size() ) , M1n( main.size() );
auto Mobs = MatZEROSd( main.size() );
/// Calculate movement
#pragma omp parallel for
forLOOPi( prev.size() )
{
if( prev[i].size() >= thrctr && next[i].size() >= thrctr &&
0.8 * prev[i].size() < next[i].size() )
{
Matd Mp = prev[i].getM(); // Changed to P
Matd Mn = next[i].getM(); // Changed to P
LibICPd icp( Mn );
icp.fit( Mp , main[i].R , main[i].t , thrfit );
Mobs(i) = true;
}
}
/// Move main groups
forLOOPi( main.size() )
{
M1p[i] = main[i].getM();
M1n[i] = icp_calc( M1p[i] , main[i].R , main[i].t );
main[i].updM( M1n[i] );
}
// Remove main clusters based on next
forLOOPi( main.size() )
{
if( main[i].size() >= thrctr && next[i].size() >= thrctr )
{
Matd Mm = main[i].getM();
Matd Mn = next[i].getM();
SSeqi idxs; SSeqd dsts; KDtreed tree( Mn );
tree.knnSearch( Mm , 1 , idxs , dsts );
Veci rems;
forLOOPj( idxs.size() )
if( dsts[j][0] < 0.5 * main.rad * main.rad ) rems.insert(j);
rems.update(); rems.mat().SortRows().FlipRows();
forLOOPj( rems.n() ) main[i].del( rems[j] );
}
}
/// Copy next clusters to main
forLOOPij( main.size() , next[i].size() )
main[i].push_back( next[i][j] );
forLOOPii( main.size() , next.size() )
main.objs.push_back( next[i] );
// Recalculate main movement
#pragma omp parallel for
forLOOPi( M1p.size() )
{
if( Mobs(i) )
{
Matd M2 = main[i].getM();
if( M1p[i].r() >= thrctr && M2.r() >= thrctr )
{
LibICPd icp( M2 );
icp.fit( M1p[i] , main[i].R , main[i].t , thrfit );
Matd d = ( M2 - M1p[i] ).meanRows();
double chg = ( main[i].v - d ).rsqsum();
if( chg > 0.9 )
{
main[i].t.setVal(0);
main[i].R.setIdentity();
}
else
{
if( main[i].v(0) == 0.0 ) main[i].v = d;
else main[i].v = ( main[i].v + d ) / 2.0;
if( d.rsqsum() < 0.1 ) main[i].v.setVal(0);
main[i].t = main[i].v; main[i].R.setIdentity();
}
}
}
else
{
Matd d = ( M1n[i] - M1p[i] ).meanRows();
if( main[i].v(0) == 0.0 ) main[i].v = d;
else main[i].v = ( main[i].v + d ) / 2.0;
main[i].t = main[i].v; main[i].R.setIdentity();
}
// main[i].t(2) = 0.0; // No Z motion
}
forLOOPi( main.size() ) main[i].t.print();
}
#endif
|
GB_unop__asin_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asin_fp64_fp64
// op(A') function: GB_unop_tran__asin_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = asin (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asin (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = asin (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asin_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = asin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asin_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dense.h | #ifndef __DENSE_CHOLESKY_H__
#define __DENSE_CHOLESKY_H__
#include "../matrix-formats/matrix-formats.h"
namespace __core__ {
namespace __linear_algebra__ {
namespace __cholesky__ {
template <int ver,typename T=void> typename std::enable_if<ver==0,void>::type cholesky(MatrixHandler<T,size_t>& R) {
for(size_t k=0;k<R.n();++k) {
for(size_t j=k+1;j<R.n();++j)
for(size_t i=j;i<R.n();++i)
R(j,i)-=R(k,i)*(R(k,j))/R(k,k);
T tmp=__sqrt__(__abs__(R(k,k)));
for(size_t j=k;j<R.n();++j)
R(k,j)=R(k,j)/tmp;
}
for(size_t i=0;i<R.n();++i)
for(size_t j=0;j<R.n();++j)
if(i>j)
R(i,j)=0;
}
template <int ver,typename T=void> typename std::enable_if<ver==1,void>::type cholesky(MatrixHandler<T,size_t>& R) {
for(size_t k=0;k<R.n();++k) {
#pragma omp parallel for num_threads(4)
for(size_t j=k+1;j<R.n();++j)
for(size_t i=j;i<R.n();++i)
R(j,i)-=R(k,i)*R(k,j)/R(k,k);
T tmp=__sqrt__(__abs__(R(k,k)));
#pragma omp parallel for num_threads(4)
for(size_t j=k;j<R.n();++j)
R(k,j)=R(k,j)/tmp;
}
for(size_t i=0;i<R.n();++i)
for(size_t j=0;j<R.n();++j)
if(i>j)
R(i,j)=0;
}
//template <int ver,typename T=void> typename std::enable_if<ver==2,void>::type cholesky(MatrixHandler<T,size_t>& R) {
// size_t pitch=R.pitch();
// size_t n=R.rows();
// T data=R.__data__;
//#pragma omp parallel num_threads(4)
// {
//#pragma omp single
// {
// for(size_t k=0;k<n;++k) {
// for(size_t j=k+1;j<n;++j)
// for(size_t i=j;i<n;++i) {
//#pragma omp task depend(in:(*(((*T)(data+k*pitch))+i))) depend(in:(*(((*T)(data+k*pitch))+j))) depend(in:(*(((*T)(data+k*pitch))+k))) depend(out: R(j,i))
// R(j,i)-=R(k,i)*R(k,j)/R(k,k);
// }
// for(size_t j=k+1;j<n;++j)
//#pragma omp task depend(out:(*(((*T)(data+k*pitch))+j))) depend(in:(*(((*T)(data+k*pitch))+k)))
// R(k,j)=R(k,j)/__sqrt__(__abs__(R(k,k)));
//#pragma omp task depend(out:(*(((*T)(data+k*pitch))+k)))
// R(k,k)=R(k,k)/__sqrt__(__abs__(R(k,k)));
// }
// }
// }
// for(size_t i=0;i<n;++i)
// for(size_t j=0;j<n;++j)
// if(i>j)
// R(i,j)=0;
//}
}
}
}
#endif
|
alg-yescrypt-opt.c | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2018 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
#include "crypt-port.h"
#if INCLUDE_yescrypt || INCLUDE_scrypt || INCLUDE_gost_yescrypt
#pragma GCC diagnostic ignored "-Wcast-align"
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare"
#endif
/*
* AVX and especially XOP speed up Salsa20 a lot, but this mostly matters for
* classic scrypt and for YESCRYPT_WORM (which use 8 rounds of Salsa20 per
* sub-block), and much less so for YESCRYPT_RW (which uses 2 rounds of Salsa20
* per block except during pwxform S-box initialization).
*/
#if 0
#ifdef __XOP__
#warning "Note: XOP is enabled. That's great."
#elif defined(__AVX__)
#warning \
"Note: AVX is enabled, which is great for classic scrypt and YESCRYPT_WORM, but is sometimes slightly slower than plain SSE2 for YESCRYPT_RW"
#elif defined(__SSE2__)
#warning \
"Note: AVX and XOP are not enabled, which is great for YESCRYPT_RW, but they would substantially improve performance at classic scrypt and YESCRYPT_WORM"
#elif defined(__x86_64__) || defined(__i386__)
#warning "SSE2 not enabled. Expect poor performance."
#else
#warning "Note: building generic code for non-x86. That's OK."
#endif
#endif
/*
* The SSE4 code version has fewer instructions than the generic SSE2 version,
* but all of the instructions are SIMD, thereby wasting the scalar execution
* units. Thus, the generic SSE2 version below actually runs faster on some
* CPUs due to its balanced mix of SIMD and scalar instructions.
*/
#undef USE_SSE4_FOR_32BIT
#ifdef __SSE2__
/*
* GCC before 4.9 would by default unnecessarily use store/load (without
* SSE4.1) or (V)PEXTR (with SSE4.1 or AVX) instead of simply (V)MOV.
* This was tracked as GCC bug 54349.
* "-mtune=corei7" works around this, but is only supported for GCC 4.6+.
* We use inline asm for pre-4.6 GCC, further down this file.
*/
#if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && __GNUC_MINOR__ < 9 && \
!defined(__clang__) && !defined(__ICC)
#pragma GCC target("tune=corei7")
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#elif defined(__SSE__)
#include <xmmintrin.h>
#endif
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define insecure_memzero XCRYPT_SECURE_MEMSET
#include "alg-sha256.h"
#include "alg-yescrypt-sysendian.h"
#define YESCRYPT_INTERNAL
#include "alg-yescrypt-platform.c"
#include "alg-yescrypt.h"
#if __STDC_VERSION__ >= 199901L
/* Have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#ifdef __GNUC__
#define unlikely(exp) __builtin_expect(exp, 0)
#else
#define unlikely(exp) (exp)
#endif
#ifdef __SSE__
#define PREFETCH(x, hint) _mm_prefetch((const char*)(x), (hint));
#else
#undef PREFETCH
#endif
typedef union
{
uint32_t w[16];
uint64_t d[8];
#ifdef __SSE2__
__m128i q[4];
#endif
} salsa20_blk_t;
static inline void salsa20_simd_shuffle(const salsa20_blk_t* Bin,
salsa20_blk_t* Bout)
{
#define COMBINE(out, in1, in2) \
Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32);
COMBINE(0, 0, 2)
COMBINE(1, 5, 7)
COMBINE(2, 2, 4)
COMBINE(3, 7, 1)
COMBINE(4, 4, 6)
COMBINE(5, 1, 3)
COMBINE(6, 6, 0)
COMBINE(7, 3, 5)
#undef COMBINE
}
static inline void salsa20_simd_unshuffle(const salsa20_blk_t* Bin,
salsa20_blk_t* Bout)
{
#define UNCOMBINE(out, in1, in2) \
Bout->w[out * 2] = Bin->d[in1]; \
Bout->w[out * 2 + 1] = Bin->d[in2] >> 32;
UNCOMBINE(0, 0, 6)
UNCOMBINE(1, 5, 3)
UNCOMBINE(2, 2, 0)
UNCOMBINE(3, 7, 5)
UNCOMBINE(4, 4, 2)
UNCOMBINE(5, 1, 7)
UNCOMBINE(6, 6, 4)
UNCOMBINE(7, 3, 1)
#undef UNCOMBINE
}
#ifdef __SSE2__
#define DECL_X __m128i X0, X1, X2, X3;
#define DECL_Y __m128i Y0, Y1, Y2, Y3;
#define READ_X(in) \
X0 = (in).q[0]; \
X1 = (in).q[1]; \
X2 = (in).q[2]; \
X3 = (in).q[3];
#define WRITE_X(out) \
(out).q[0] = X0; \
(out).q[1] = X1; \
(out).q[2] = X2; \
(out).q[3] = X3;
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i tmp = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(tmp, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(tmp, 32 - s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the Salsa20 core to the block provided in (X0 ... X3).
*/
#define SALSA20_wrapper(out, rounds) \
{ \
__m128i Z0 = X0, Z1 = X1, Z2 = X2, Z3 = X3; \
rounds(out).q[0] = X0 = _mm_add_epi32(X0, Z0); \
(out).q[1] = X1 = _mm_add_epi32(X1, Z1); \
(out).q[2] = X2 = _mm_add_epi32(X2, Z2); \
(out).q[3] = X3 = _mm_add_epi32(X3, Z3); \
}
/**
* Apply the Salsa20/2 core to the block provided in X.
*/
#define SALSA20_2(out) SALSA20_wrapper(out, SALSA20_2ROUNDS)
#define SALSA20_8ROUNDS \
SALSA20_2ROUNDS SALSA20_2ROUNDS SALSA20_2ROUNDS SALSA20_2ROUNDS
#define XOR_X(in) \
X0 = _mm_xor_si128(X0, (in).q[0]); \
X1 = _mm_xor_si128(X1, (in).q[1]); \
X2 = _mm_xor_si128(X2, (in).q[2]); \
X3 = _mm_xor_si128(X3, (in).q[3]);
#define XOR_X_2(in1, in2) \
X0 = _mm_xor_si128((in1).q[0], (in2).q[0]); \
X1 = _mm_xor_si128((in1).q[1], (in2).q[1]); \
X2 = _mm_xor_si128((in1).q[2], (in2).q[2]); \
X3 = _mm_xor_si128((in1).q[3], (in2).q[3]);
#define XOR_X_WRITE_XOR_Y_2(out, in) \
(out).q[0] = Y0 = _mm_xor_si128((out).q[0], (in).q[0]); \
(out).q[1] = Y1 = _mm_xor_si128((out).q[1], (in).q[1]); \
(out).q[2] = Y2 = _mm_xor_si128((out).q[2], (in).q[2]); \
(out).q[3] = Y3 = _mm_xor_si128((out).q[3], (in).q[3]); \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
/**
* Apply the Salsa20/8 core to the block provided in X ^ in.
*/
#define SALSA20_8_XOR_MEM(in, out) \
XOR_X(in) \
SALSA20_wrapper(out, SALSA20_8ROUNDS)
#define INTEGERIFY _mm_cvtsi128_si32(X0)
#else /* !defined(__SSE2__) */
#define DECL_X salsa20_blk_t X;
#define DECL_Y salsa20_blk_t Y;
#define COPY(out, in) \
(out).d[0] = (in).d[0]; \
(out).d[1] = (in).d[1]; \
(out).d[2] = (in).d[2]; \
(out).d[3] = (in).d[3]; \
(out).d[4] = (in).d[4]; \
(out).d[5] = (in).d[5]; \
(out).d[6] = (in).d[6]; \
(out).d[7] = (in).d[7];
#define READ_X(in) COPY(X, in)
#define WRITE_X(out) COPY(out, X)
/**
* salsa20(B):
* Apply the Salsa20 core to the provided block.
*/
static inline void salsa20(salsa20_blk_t* restrict B,
salsa20_blk_t* restrict Bout, uint32_t doublerounds)
{
salsa20_blk_t X;
#define x X.w
salsa20_simd_unshuffle(B, &X);
do
{
#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b))))
/* Operate on columns */
x[4] ^= R(x[0] + x[12], 7);
x[8] ^= R(x[4] + x[0], 9);
x[12] ^= R(x[8] + x[4], 13);
x[0] ^= R(x[12] + x[8], 18);
x[9] ^= R(x[5] + x[1], 7);
x[13] ^= R(x[9] + x[5], 9);
x[1] ^= R(x[13] + x[9], 13);
x[5] ^= R(x[1] + x[13], 18);
x[14] ^= R(x[10] + x[6], 7);
x[2] ^= R(x[14] + x[10], 9);
x[6] ^= R(x[2] + x[14], 13);
x[10] ^= R(x[6] + x[2], 18);
x[3] ^= R(x[15] + x[11], 7);
x[7] ^= R(x[3] + x[15], 9);
x[11] ^= R(x[7] + x[3], 13);
x[15] ^= R(x[11] + x[7], 18);
/* Operate on rows */
x[1] ^= R(x[0] + x[3], 7);
x[2] ^= R(x[1] + x[0], 9);
x[3] ^= R(x[2] + x[1], 13);
x[0] ^= R(x[3] + x[2], 18);
x[6] ^= R(x[5] + x[4], 7);
x[7] ^= R(x[6] + x[5], 9);
x[4] ^= R(x[7] + x[6], 13);
x[5] ^= R(x[4] + x[7], 18);
x[11] ^= R(x[10] + x[9], 7);
x[8] ^= R(x[11] + x[10], 9);
x[9] ^= R(x[8] + x[11], 13);
x[10] ^= R(x[9] + x[8], 18);
x[12] ^= R(x[15] + x[14], 7);
x[13] ^= R(x[12] + x[15], 9);
x[14] ^= R(x[13] + x[12], 13);
x[15] ^= R(x[14] + x[13], 18);
#undef R
} while (--doublerounds);
#undef x
{
uint32_t i;
salsa20_simd_shuffle(&X, Bout);
for (i = 0; i < 16; i += 4)
{
B->w[i] = Bout->w[i] += B->w[i];
B->w[i + 1] = Bout->w[i + 1] += B->w[i + 1];
B->w[i + 2] = Bout->w[i + 2] += B->w[i + 2];
B->w[i + 3] = Bout->w[i + 3] += B->w[i + 3];
}
}
#if 0
/* Too expensive */
insecure_memzero(&X, sizeof(X));
#endif
}
/**
* Apply the Salsa20/2 core to the block provided in X.
*/
#define SALSA20_2(out) salsa20(&X, &out, 1);
#define XOR(out, in1, in2) \
(out).d[0] = (in1).d[0] ^ (in2).d[0]; \
(out).d[1] = (in1).d[1] ^ (in2).d[1]; \
(out).d[2] = (in1).d[2] ^ (in2).d[2]; \
(out).d[3] = (in1).d[3] ^ (in2).d[3]; \
(out).d[4] = (in1).d[4] ^ (in2).d[4]; \
(out).d[5] = (in1).d[5] ^ (in2).d[5]; \
(out).d[6] = (in1).d[6] ^ (in2).d[6]; \
(out).d[7] = (in1).d[7] ^ (in2).d[7];
#define XOR_X(in) XOR(X, X, in)
#define XOR_X_2(in1, in2) XOR(X, in1, in2)
#define XOR_X_WRITE_XOR_Y_2(out, in) \
XOR(Y, out, in) \
COPY(out, Y) \
XOR(X, X, Y)
/**
* Apply the Salsa20/8 core to the block provided in X ^ in.
*/
#define SALSA20_8_XOR_MEM(in, out) \
XOR_X(in); \
salsa20(&X, &out, 4);
#define INTEGERIFY (uint32_t) X.d[0]
#endif
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static void blockmix_salsa8(const salsa20_blk_t* restrict Bin,
salsa20_blk_t* restrict Bout, size_t r)
{
size_t i;
DECL_X
READ_X(Bin[r * 2 - 1])
for (i = 0; i < r; i++)
{
SALSA20_8_XOR_MEM(Bin[i * 2], Bout[i])
SALSA20_8_XOR_MEM(Bin[i * 2 + 1], Bout[r + i])
}
}
static uint32_t blockmix_salsa8_xor(const salsa20_blk_t* restrict Bin1,
const salsa20_blk_t* restrict Bin2,
salsa20_blk_t* restrict Bout, size_t r)
{
size_t i;
DECL_X
#ifdef PREFETCH
PREFETCH(&Bin2[r * 2 - 1], _MM_HINT_T0)
for (i = 0; i < r - 1; i++)
{
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
}
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
#endif
XOR_X_2(Bin1[r * 2 - 1], Bin2[r * 2 - 1])
for (i = 0; i < r; i++)
{
XOR_X(Bin1[i * 2])
SALSA20_8_XOR_MEM(Bin2[i * 2], Bout[i])
XOR_X(Bin1[i * 2 + 1])
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1], Bout[r + i])
}
return INTEGERIFY;
}
/* This is tunable */
#define Swidth 8
/* Not tunable in this implementation, hard-coded in a few places */
#define PWXsimple 2
#define PWXgather 4
/* Derived values. Not tunable except via Swidth above. */
#define PWXbytes (PWXgather * PWXsimple * 8)
#define Sbytes (3 * (1 << Swidth) * PWXsimple * 8)
#define Smask (((1 << Swidth) - 1) * PWXsimple * 8)
#define Smask2 (((uint64_t)Smask << 32) | Smask)
#define DECL_SMASK2REG /* empty */
#define FORCE_REGALLOC_3 /* empty */
#define MAYBE_MEMORY_BARRIER /* empty */
#ifdef __SSE2__
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver).
*/
#ifdef __AVX__
#define HI32(X) _mm_srli_si128((X), 4)
#elif 1 /* As an option, check for __SSE4_1__ here not to hurt Conroe */
#define HI32(X) _mm_shuffle_epi32((X), _MM_SHUFFLE(2, 3, 0, 1))
#else
#define HI32(X) _mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && \
!defined(__ICC)
#ifdef __AVX__
#define MOVQ "vmovq"
#else
/* "movq" would be more correct, but "movd" is supported by older binutils
* due to an error in AMD's spec for x86-64. */
#define MOVQ "movd"
#endif
#define EXTRACT64(X) \
({ \
uint64_t result; \
__asm__(MOVQ " %1, %0" : "=r"(result) : "x"(X)); \
result; \
})
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(USE_SSE4_FOR_32BIT) && defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
#if defined(__x86_64__) && (defined(__AVX__) || !defined(__GNUC__))
/* 64-bit with AVX */
/* Force use of 64-bit AND instead of two 32-bit ANDs */
#undef DECL_SMASK2REG
#if defined(__GNUC__) && !defined(__ICC)
#define DECL_SMASK2REG uint64_t Smask2reg = Smask2;
/* Force use of lower-numbered registers to reduce number of prefixes, relying
* on out-of-order execution and register renaming. */
#define FORCE_REGALLOC_1 \
__asm__("" : "=a"(x), "+d"(Smask2reg), "+S"(S0), "+D"(S1));
#define FORCE_REGALLOC_2 __asm__("" : : "c"(lo));
#else
static volatile uint64_t Smask2var = Smask2;
#define DECL_SMASK2REG uint64_t Smask2reg = Smask2var;
#define FORCE_REGALLOC_1 /* empty */
#define FORCE_REGALLOC_2 /* empty */
#endif
#define PWXFORM_SIMD(X) \
{ \
uint64_t x; \
FORCE_REGALLOC_1 \
uint32_t lo = x = EXTRACT64(X) & Smask2reg; \
FORCE_REGALLOC_2 \
uint32_t hi = x >> 32; \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, *(__m128i*)(S0 + lo)); \
X = _mm_xor_si128(X, *(__m128i*)(S1 + hi)); \
}
#elif defined(__x86_64__)
/* 64-bit without AVX. This relies on out-of-order execution and register
* renaming. It may actually be fastest on CPUs with AVX(2) as well - e.g.,
* it runs great on Haswell. */
#if 0
#warning "Note: using x86-64 inline assembly for YESCRYPT_RW. That's great."
#endif
/* We need a compiler memory barrier between sub-blocks to ensure that none of
* the writes into what was S2 during processing of the previous sub-block are
* postponed until after a read from S0 or S1 in the inline asm code below. */
#undef MAYBE_MEMORY_BARRIER
#define MAYBE_MEMORY_BARRIER __asm__("" : : : "memory");
#ifdef __ILP32__ /* x32 */
#define REGISTER_PREFIX "e"
#else
#define REGISTER_PREFIX "r"
#endif
#define PWXFORM_SIMD(X) \
{ \
__m128i H; \
__asm__("movd %0, %%rax\n\t" \
"pshufd $0xb1, %0, %1\n\t" \
"andq %2, %%rax\n\t" \
"pmuludq %1, %0\n\t" \
"movl %%eax, %%ecx\n\t" \
"shrq $0x20, %%rax\n\t" \
"paddq (%3,%%" REGISTER_PREFIX "cx), %0\n\t" \
"pxor (%4,%%" REGISTER_PREFIX "ax), %0\n\t" \
: "+x"(X), "=x"(H) \
: "d"(Smask2), "S"(S0), "D"(S1) \
: "cc", "ax", "cx"); \
}
#elif defined(USE_SSE4_FOR_32BIT) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_SIMD(X) \
{ \
__m128i x = _mm_and_si128(X, _mm_set1_epi64x(Smask2)); \
__m128i s0 = *(__m128i*)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
__m128i s1 = *(__m128i*)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1); \
}
#else
/* 32-bit without SSE4.1 */
#define PWXFORM_SIMD(X) \
{ \
uint64_t x = EXTRACT64(X) & Smask2; \
__m128i s0 = *(__m128i*)(S0 + (uint32_t)x); \
__m128i s1 = *(__m128i*)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1); \
}
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0) \
PWXFORM_SIMD(X1) \
PWXFORM_SIMD(X2) \
PWXFORM_SIMD(X3)
#if defined(__x86_64__) && defined(__GNUC__) && !defined(__ICC)
#undef FORCE_REGALLOC_3
#define FORCE_REGALLOC_3 __asm__("" : : "b"(Sw));
#endif
#else /* !defined(__SSE2__) */
#define PWXFORM_SIMD(x0, x1) \
{ \
uint64_t x = x0 & Smask2; \
uint64_t* p0 = (uint64_t*)(S0 + (uint32_t)x); \
uint64_t* p1 = (uint64_t*)(S1 + (x >> 32)); \
x0 = ((x0 >> 32) * (uint32_t)x0 + p0[0]) ^ p1[0]; \
x1 = ((x1 >> 32) * (uint32_t)x1 + p0[1]) ^ p1[1]; \
}
#define PWXFORM_ROUND \
PWXFORM_SIMD(X.d[0], X.d[1]) \
PWXFORM_SIMD(X.d[2], X.d[3]) \
PWXFORM_SIMD(X.d[4], X.d[5]) \
PWXFORM_SIMD(X.d[6], X.d[7])
#endif
/*
* This offset helps address the 256-byte write block via the single-byte
* displacements encodable in x86(-64) instructions. It is needed because the
* displacements are signed. Without it, we'd get 4-byte displacements for
* half of the writes. Setting it to 0x80 instead of 0x7c would avoid needing
* a displacement for one of the writes, but then the LEA instruction would
* need a 4-byte displacement.
*/
#define PWXFORM_WRITE_OFFSET 0x7c
#define PWXFORM_WRITE \
WRITE_X(*(salsa20_blk_t*)(Sw - PWXFORM_WRITE_OFFSET)) \
Sw += 64;
#define PWXFORM \
{ \
uint8_t* Sw = S2 + w + PWXFORM_WRITE_OFFSET; \
FORCE_REGALLOC_3 \
MAYBE_MEMORY_BARRIER \
PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_WRITE PWXFORM_ROUND PWXFORM_WRITE PWXFORM_ROUND \
PWXFORM_WRITE PWXFORM_ROUND PWXFORM_WRITE PWXFORM_ROUND w = \
(w + 64 * 4) & Smask2; \
{ \
uint8_t* Stmp = S2; \
S2 = S1; \
S1 = S0; \
S0 = Stmp; \
} \
}
typedef struct
{
uint8_t *S0, *S1, *S2;
size_t w;
} pwxform_ctx_t;
#define Salloc (Sbytes + ((sizeof(pwxform_ctx_t) + 63) & ~63U))
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/2, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void blockmix(const salsa20_blk_t* restrict Bin,
salsa20_blk_t* restrict Bout, size_t r,
pwxform_ctx_t* restrict ctx)
{
/* ctx MUST NOT be NULL */
assert(ctx != NULL);
uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2;
size_t w = ctx->w;
size_t i;
DECL_X
/* Convert count of 128-byte blocks to max index of 64-byte block */
r = r * 2 - 1;
READ_X(Bin[r])
DECL_SMASK2REG
i = 0;
do
{
XOR_X(Bin[i])
PWXFORM
if (unlikely(i >= r))
break;
WRITE_X(Bout[i])
i++;
} while (1);
ctx->S0 = S0;
ctx->S1 = S1;
ctx->S2 = S2;
ctx->w = w;
SALSA20_2(Bout[i])
}
static uint32_t blockmix_xor(const salsa20_blk_t* Bin1,
const salsa20_blk_t* restrict Bin2,
salsa20_blk_t* Bout, size_t r, int Bin2_in_ROM,
pwxform_ctx_t* restrict ctx)
{
/* ctx MUST NOT be NULL */
assert(ctx != NULL);
uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2;
size_t w = ctx->w;
size_t i;
DECL_X
/* Convert count of 128-byte blocks to max index of 64-byte block */
r = r * 2 - 1;
#ifdef PREFETCH
if (Bin2_in_ROM)
{
PREFETCH(&Bin2[r], _MM_HINT_NTA)
for (i = 0; i < r; i++)
{
PREFETCH(&Bin2[i], _MM_HINT_NTA)
}
}
else
{
PREFETCH(&Bin2[r], _MM_HINT_T0)
for (i = 0; i < r; i++)
{
PREFETCH(&Bin2[i], _MM_HINT_T0)
}
}
#else
(void)Bin2_in_ROM; /* unused */
#endif
XOR_X_2(Bin1[r], Bin2[r])
DECL_SMASK2REG
i = 0;
r--;
do
{
XOR_X(Bin1[i])
XOR_X(Bin2[i])
PWXFORM
WRITE_X(Bout[i])
XOR_X(Bin1[i + 1])
XOR_X(Bin2[i + 1])
PWXFORM
if (unlikely(i >= r))
break;
WRITE_X(Bout[i + 1])
i += 2;
} while (1);
i++;
ctx->S0 = S0;
ctx->S1 = S1;
ctx->S2 = S2;
ctx->w = w;
SALSA20_2(Bout[i])
return INTEGERIFY;
}
static uint32_t blockmix_xor_save(salsa20_blk_t* restrict Bin1out,
salsa20_blk_t* restrict Bin2, size_t r,
pwxform_ctx_t* restrict ctx)
{
/* ctx MUST NOT be NULL */
assert(ctx != NULL);
uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2;
size_t w = ctx->w;
size_t i;
DECL_X
DECL_Y
/* Convert count of 128-byte blocks to max index of 64-byte block */
r = r * 2 - 1;
#ifdef PREFETCH
PREFETCH(&Bin2[r], _MM_HINT_T0)
for (i = 0; i < r; i++)
{
PREFETCH(&Bin2[i], _MM_HINT_T0)
}
#endif
XOR_X_2(Bin1out[r], Bin2[r])
DECL_SMASK2REG
i = 0;
r--;
do
{
XOR_X_WRITE_XOR_Y_2(Bin2[i], Bin1out[i])
PWXFORM
WRITE_X(Bin1out[i])
XOR_X_WRITE_XOR_Y_2(Bin2[i + 1], Bin1out[i + 1])
PWXFORM
if (unlikely(i >= r))
break;
WRITE_X(Bin1out[i + 1])
i += 2;
} while (1);
i++;
ctx->S0 = S0;
ctx->S1 = S1;
ctx->S2 = S2;
ctx->w = w;
SALSA20_2(Bin1out[i])
return INTEGERIFY;
}
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t integerify(const salsa20_blk_t* B, size_t r)
{
/*
* Our 64-bit words are in host byte order, which is why we don't just read
* w[0] here (would be wrong on big-endian). Also, our 32-bit words are
* SIMD-shuffled (so the next 32 bits would be part of d[6]), but currently
* this does not matter as we only care about the least significant 32 bits.
*/
return (uint32_t)B[2 * r - 1].d[0];
}
/**
* smix1(B, r, N, flags, V, NROM, VROM, XY, ctx):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r+64 bytes in length. N must be even and at least 4.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and XY
* to a multiple of at least 16 bytes.
*/
static void smix1(uint8_t* B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t* V, uint32_t NROM, const salsa20_blk_t* VROM,
salsa20_blk_t* XY, pwxform_ctx_t* ctx)
{
size_t s = 2 * r;
salsa20_blk_t *X = V, *Y = &V[s];
uint32_t i, j;
for (i = 0; i < 2 * r; i++)
{
const salsa20_blk_t* src = (salsa20_blk_t*)&B[i * 64];
salsa20_blk_t* tmp = Y;
salsa20_blk_t* dst = &X[i];
size_t k;
for (k = 0; k < 16; k++)
tmp->w[k] = le32dec(&src->w[k]);
salsa20_simd_shuffle(tmp, dst);
}
if (VROM)
{
uint32_t n;
const salsa20_blk_t* V_j;
V_j = &VROM[(NROM - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 1, ctx) & (NROM - 1);
V_j = &VROM[j * s];
X = Y + s;
j = blockmix_xor(Y, V_j, X, r, 1, ctx);
for (n = 2; n < N; n <<= 1)
{
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
for (i = 1; i < m; i += 2)
{
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
Y = X + s;
j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (NROM - 1);
V_j = &VROM[j * s];
X = Y + s;
j = blockmix_xor(Y, V_j, X, r, 1, ctx);
}
}
n >>= 1;
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
Y = X + s;
j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (NROM - 1);
V_j = &VROM[j * s];
blockmix_xor(Y, V_j, XY, r, 1, ctx);
}
else if (flags & YESCRYPT_RW)
{
uint32_t n;
salsa20_blk_t* V_j;
blockmix(X, Y, r, ctx);
X = Y + s;
blockmix(Y, X, r, ctx);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1)
{
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
for (i = 1; i < m; i += 2)
{
Y = X + s;
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
j = blockmix_xor(X, V_j, Y, r, 0, ctx);
j &= n - 1;
j += i;
V_j = &V[j * s];
X = Y + s;
j = blockmix_xor(Y, V_j, X, r, 0, ctx);
}
}
n >>= 1;
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
Y = X + s;
j = blockmix_xor(X, V_j, Y, r, 0, ctx);
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
blockmix_xor(Y, V_j, XY, r, 0, ctx);
}
else
{
N -= 2;
do
{
blockmix_salsa8(X, Y, r);
X = Y + s;
blockmix_salsa8(Y, X, r);
Y = X + s;
} while ((N -= 2));
blockmix_salsa8(X, Y, r);
blockmix_salsa8(Y, XY, r);
}
for (i = 0; i < 2 * r; i++)
{
const salsa20_blk_t* src = &XY[i];
salsa20_blk_t* tmp = &XY[s];
salsa20_blk_t* dst = (salsa20_blk_t*)&B[i * 64];
size_t k;
for (k = 0; k < 16; k++)
le32enc(&tmp->w[k], src->w[k]);
salsa20_simd_unshuffle(tmp, dst);
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, VROM, XY, ctx):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. N must be a power of 2 and at
* least 2. Nloop must be even. The array V must be aligned to a multiple of
* 64 bytes, and arrays B and XY to a multiple of at least 16 bytes.
*/
static void smix2(uint8_t* B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t* V, uint32_t NROM,
const salsa20_blk_t* VROM, salsa20_blk_t* XY,
pwxform_ctx_t* ctx)
{
size_t s = 2 * r;
salsa20_blk_t *X = XY, *Y = &XY[s];
uint32_t i, j;
if (Nloop == 0)
return;
for (i = 0; i < 2 * r; i++)
{
const salsa20_blk_t* src = (salsa20_blk_t*)&B[i * 64];
salsa20_blk_t* tmp = Y;
salsa20_blk_t* dst = &X[i];
size_t k;
for (k = 0; k < 16; k++)
tmp->w[k] = le32dec(&src->w[k]);
salsa20_simd_shuffle(tmp, dst);
}
j = integerify(X, r) & (N - 1);
/*
* Normally, VROM implies YESCRYPT_RW, but we check for these separately
* because our SMix resets YESCRYPT_RW for the smix2() calls operating on
* the entire V when p > 1.
*/
if (VROM && (flags & YESCRYPT_RW))
{
do
{
salsa20_blk_t* V_j = &V[j * s];
const salsa20_blk_t* VROM_j;
j = blockmix_xor_save(X, V_j, r, ctx) & (NROM - 1);
VROM_j = &VROM[j * s];
j = blockmix_xor(X, VROM_j, X, r, 1, ctx) & (N - 1);
} while (Nloop -= 2);
}
else if (VROM)
{
do
{
const salsa20_blk_t* V_j = &V[j * s];
j = blockmix_xor(X, V_j, X, r, 0, ctx) & (NROM - 1);
V_j = &VROM[j * s];
j = blockmix_xor(X, V_j, X, r, 1, ctx) & (N - 1);
} while (Nloop -= 2);
}
else if (flags & YESCRYPT_RW)
{
do
{
salsa20_blk_t* V_j = &V[j * s];
j = blockmix_xor_save(X, V_j, r, ctx) & (N - 1);
V_j = &V[j * s];
j = blockmix_xor_save(X, V_j, r, ctx) & (N - 1);
} while (Nloop -= 2);
}
else if (ctx)
{
do
{
const salsa20_blk_t* V_j = &V[j * s];
j = blockmix_xor(X, V_j, X, r, 0, ctx) & (N - 1);
V_j = &V[j * s];
j = blockmix_xor(X, V_j, X, r, 0, ctx) & (N - 1);
} while (Nloop -= 2);
}
else
{
do
{
const salsa20_blk_t* V_j = &V[j * s];
j = blockmix_salsa8_xor(X, V_j, Y, r) & (N - 1);
V_j = &V[j * s];
j = blockmix_salsa8_xor(Y, V_j, X, r) & (N - 1);
} while (Nloop -= 2);
}
for (i = 0; i < 2 * r; i++)
{
const salsa20_blk_t* src = &X[i];
salsa20_blk_t* tmp = Y;
salsa20_blk_t* dst = (salsa20_blk_t*)&B[i * 64];
size_t k;
for (k = 0; k < 16; k++)
le32enc(&tmp->w[k], src->w[k]);
salsa20_simd_unshuffle(tmp, dst);
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, passwd):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage
* XY must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). N must be a power of 2 and at least 4. The array V
* must be aligned to a multiple of 64 bytes, and arrays B and XY to a multiple
* of at least 16 bytes (aligning them to 64 bytes as well saves cache lines
* and helps avoid false sharing in OpenMP-enabled builds when p > 1, but it
* might also result in cache bank conflicts).
*/
static void smix(uint8_t* B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags, salsa20_blk_t* V, uint32_t NROM,
const salsa20_blk_t* VROM, salsa20_blk_t* XY, uint8_t* S,
uint8_t* passwd)
{
size_t s = 2 * r;
uint32_t Nchunk;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nchunk = N / p;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW)
{
if (t <= 1)
{
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
}
else
{
Nloop_all *= t - 1;
}
}
else if (t)
{
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++;
Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw++;
Nloop_rw &= ~(uint64_t)1; /* round up to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) \
shared(B, r, N, p, flags, V, NROM, VROM, XY, S, passwd, s, Nchunk, \
Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++)
{
uint32_t Vchunk = i * Nchunk;
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
uint8_t* Bp = &B[128 * r * i];
salsa20_blk_t* Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t* XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t* XYp = XY;
#endif
pwxform_ctx_t* ctx_i = NULL;
if (flags & YESCRYPT_RW)
{
uint8_t* Si = S + i * Salloc;
smix1(Bp, 1, Sbytes / 128, 0 /* no flags */, (salsa20_blk_t*)Si,
0, NULL, XYp, NULL);
ctx_i = (pwxform_ctx_t*)(Si + Sbytes);
ctx_i->S2 = Si;
ctx_i->S1 = Si + Sbytes / 3;
ctx_i->S0 = Si + Sbytes / 3 * 2;
ctx_i->w = 0;
if (i == 0)
HMAC_SHA256_Buf(Bp + (128 * r - 64), 64, passwd, 32,
passwd);
}
smix1(Bp, r, Np, flags, Vp, NROM, VROM, XYp, ctx_i);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, VROM, XYp,
ctx_i);
}
if (Nloop_all > Nloop_rw)
{
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++)
{
uint8_t* Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t* XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t* XYp = XY;
#endif
pwxform_ctx_t* ctx_i = NULL;
if (flags & YESCRYPT_RW)
{
uint8_t* Si = S + i * Salloc;
ctx_i = (pwxform_ctx_t*)(Si + Sbytes);
}
smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V,
NROM, VROM, XYp, ctx_i);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen,
* flags, N, r, p, t, NROM, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf.
*
* shared and flags may request special modes as described in yescrypt.h.
*
* local is the thread-local data structure, allowing to preserve and reuse a
* memory allocation across calls, thereby reducing its overhead.
*
* t controls computation time while not affecting peak memory usage.
*
* Return 0 on success; or -1 on error.
*
* This optimized implementation currently limits N to the range from 4 to
* 2^31, but other implementations might not.
*/
static int yescrypt_kdf_body(const yescrypt_shared_t* shared,
yescrypt_local_t* local, const uint8_t* passwd,
size_t passwdlen, const uint8_t* salt,
size_t saltlen, yescrypt_flags_t flags, uint64_t N,
uint32_t r, uint32_t p, uint32_t t, uint64_t NROM,
uint8_t* buf, size_t buflen)
{
yescrypt_region_t tmp;
const salsa20_blk_t* VROM;
size_t B_size, V_size, XY_size, need;
uint8_t *B, *S;
salsa20_blk_t *V, *XY;
uint8_t sha256[32];
uint8_t dk[sizeof(sha256)], *dkp = buf;
/* Sanity-check parameters */
switch (flags & YESCRYPT_MODE_MASK)
{
case 0: /* classic scrypt - can't have anything non-standard */
if (flags || t || NROM)
goto out_EINVAL;
break;
case YESCRYPT_WORM:
if (flags != YESCRYPT_WORM || NROM)
goto out_EINVAL;
break;
case YESCRYPT_RW:
if (flags != (flags & YESCRYPT_KNOWN_FLAGS))
goto out_EINVAL;
#if PWXsimple == 2 && PWXgather == 4 && Sbytes == 12288
if ((flags & YESCRYPT_RW_FLAVOR_MASK) ==
(YESCRYPT_ROUNDS_6 | YESCRYPT_GATHER_4 | YESCRYPT_SIMPLE_2 |
YESCRYPT_SBOX_12K))
break;
#else
#error "Unsupported pwxform settings"
#endif
/* FALLTHRU */
default:
goto out_EINVAL;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)1 << 32) - 1) * 32)
goto out_EINVAL;
#endif
if ((uint64_t)r * (uint64_t)p >= 1 << 30)
goto out_EINVAL;
if (N > UINT32_MAX)
goto out_EINVAL;
if ((N & (N - 1)) != 0 || N <= 3 || r < 1 || p < 1)
goto out_EINVAL;
if (r > SIZE_MAX / 256 / p || N > SIZE_MAX / 128 / r)
goto out_EINVAL;
if (flags & YESCRYPT_RW)
{
if (N / p <= 3 || p > SIZE_MAX / Salloc)
goto out_EINVAL;
}
#ifdef _OPENMP
else if (N > SIZE_MAX / 128 / (r * p))
{
goto out_EINVAL;
}
#endif
VROM = NULL;
if (shared)
{
uint64_t expected_size = (size_t)128 * r * NROM;
if ((NROM & (NROM - 1)) != 0 || NROM <= 1 || NROM > UINT32_MAX ||
shared->aligned_size < expected_size)
goto out_EINVAL;
if (!(flags & YESCRYPT_INIT_SHARED))
{
uint64_t* tag =
(uint64_t*)((uint8_t*)shared->aligned + expected_size - 48);
if (tag[0] != YESCRYPT_ROM_TAG1 || tag[1] != YESCRYPT_ROM_TAG2)
goto out_EINVAL;
}
VROM = shared->aligned;
}
else
{
if (NROM)
goto out_EINVAL;
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_RW))
V_size *= p;
#endif
need = V_size;
if (flags & YESCRYPT_INIT_SHARED)
{
if (local->aligned_size < need)
{
if (local->base || local->aligned || local->base_size ||
local->aligned_size)
goto out_EINVAL;
if (!alloc_region(local, need))
return -1;
}
if (flags & YESCRYPT_ALLOC_ONLY)
return -2; /* expected "failure" */
V = (salsa20_blk_t*)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size)
goto out_EINVAL;
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size)
goto out_EINVAL;
if (flags & YESCRYPT_RW)
{
size_t S_size = (size_t)Salloc * p;
need += S_size;
if (need < S_size)
goto out_EINVAL;
}
if (flags & YESCRYPT_INIT_SHARED)
{
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t*)tmp.aligned;
XY = (salsa20_blk_t*)((uint8_t*)B + B_size);
}
else
{
init_region(&tmp);
if (local->aligned_size < need)
{
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
if (flags & YESCRYPT_ALLOC_ONLY)
return -3; /* expected "failure" */
B = (uint8_t*)local->aligned;
V = (salsa20_blk_t*)((uint8_t*)B + B_size);
XY = (salsa20_blk_t*)((uint8_t*)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_RW)
S = (uint8_t*)XY + XY_size;
if (flags)
{
HMAC_SHA256_Buf("yescrypt-prehash", (flags & YESCRYPT_PREHASH) ? 16 : 8,
passwd, passwdlen, sha256);
passwd = sha256;
passwdlen = sizeof(sha256);
}
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_RW))
{
smix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, sha256);
}
else
{
uint32_t i;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) \
shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S)
#endif
for (i = 0; i < p; i++)
{
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N], NROM, VROM, &XY[(size_t)4 * r * i],
NULL, NULL);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, VROM, XY,
NULL, NULL);
#endif
}
}
dkp = buf;
if (flags && buflen < sizeof(dk))
{
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, dk, sizeof(dk));
dkp = dk;
}
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if (flags && !(flags & YESCRYPT_PREHASH))
{
/* Compute ClientKey */
HMAC_SHA256_Buf(dkp, sizeof(dk), "Client Key", 10, sha256);
/* Compute StoredKey */
{
size_t clen = buflen;
if (clen > sizeof(dk))
clen = sizeof(dk);
SHA256_Buf(sha256, sizeof(sha256), dk);
memcpy(buf, dk, clen);
}
}
if (flags)
{
insecure_memzero(sha256, sizeof(sha256));
insecure_memzero(dk, sizeof(dk));
}
if (free_region(&tmp))
{
insecure_memzero(buf, buflen); /* must preserve errno */
return -1;
}
/* Success! */
return 0;
out_EINVAL:
errno = EINVAL;
return -1;
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, params,
* buf, buflen):
* Compute scrypt or its revision as requested by the parameters. The inputs
* to this function are the same as those for yescrypt_kdf_body() above, with
* the addition of g, which controls hash upgrades (0 for no upgrades so far).
*/
int yescrypt_kdf(const yescrypt_shared_t* shared, yescrypt_local_t* local,
const uint8_t* passwd, size_t passwdlen, const uint8_t* salt,
size_t saltlen, const yescrypt_params_t* params, uint8_t* buf,
size_t buflen)
{
yescrypt_flags_t flags = params->flags;
uint64_t N = params->N;
uint32_t r = params->r;
uint32_t p = params->p;
uint32_t t = params->t;
uint32_t g = params->g;
uint64_t NROM = params->NROM;
uint8_t dk[32];
int retval;
/* Support for hash upgrades has been temporarily removed */
if (g)
{
errno = EINVAL;
return -1;
}
if ((flags & (YESCRYPT_RW | YESCRYPT_INIT_SHARED)) == YESCRYPT_RW &&
p >= 1 && N / p >= 0x100 && N / p * r >= 0x20000)
{
if (yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen,
flags | YESCRYPT_ALLOC_ONLY, N, r, p, t, NROM,
buf, buflen) != -3)
{
errno = EINVAL;
return -1;
}
if ((retval = yescrypt_kdf_body(shared, local, passwd, passwdlen, salt,
saltlen, flags | YESCRYPT_PREHASH,
N >> 6, r, p, 0, NROM, dk, sizeof(dk))))
return retval;
passwd = dk;
passwdlen = sizeof(dk);
}
retval = yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen,
flags, N, r, p, t, NROM, buf, buflen);
#ifndef SKIP_MEMZERO
if (passwd == dk)
insecure_memzero(dk, sizeof(dk));
#endif
return retval;
}
int yescrypt_init_shared(yescrypt_shared_t* shared, const uint8_t* seed,
size_t seedlen, const yescrypt_params_t* params)
{
yescrypt_params_t subparams;
yescrypt_shared_t half1, half2;
uint8_t salt[32];
uint64_t* tag;
subparams = *params;
subparams.flags |= YESCRYPT_INIT_SHARED;
subparams.N = params->NROM;
subparams.NROM = 0;
if (!(params->flags & YESCRYPT_RW) || params->N || params->g)
return -1;
if (params->flags & YESCRYPT_SHARED_PREALLOCATED)
{
if (!shared->aligned || !shared->aligned_size)
return -1;
/* Overwrite a possible old ROM tag before we overwrite the rest */
tag =
(uint64_t*)((uint8_t*)shared->aligned + shared->aligned_size - 48);
memset(tag, 0, 48);
}
else
{
init_region(shared);
subparams.flags |= YESCRYPT_ALLOC_ONLY;
if (yescrypt_kdf(NULL, shared, NULL, 0, NULL, 0, &subparams, NULL, 0) !=
-2 ||
!shared->aligned)
return -1;
subparams.flags -= YESCRYPT_ALLOC_ONLY;
}
subparams.N /= 2;
half1 = *shared;
half1.aligned_size /= 2;
half2 = half1;
half2.aligned = (uint8_t*)half2.aligned + half1.aligned_size;
if (yescrypt_kdf(NULL, &half1, seed, seedlen, (uint8_t*)"yescrypt-ROMhash",
16, &subparams, salt, sizeof(salt)))
goto fail;
subparams.NROM = subparams.N;
if (yescrypt_kdf(&half1, &half2, seed, seedlen, salt, sizeof(salt),
&subparams, salt, sizeof(salt)))
goto fail;
if (yescrypt_kdf(&half2, &half1, seed, seedlen, salt, sizeof(salt),
&subparams, salt, sizeof(salt)))
goto fail;
tag = (uint64_t*)((uint8_t*)shared->aligned + shared->aligned_size - 48);
tag[0] = YESCRYPT_ROM_TAG1;
tag[1] = YESCRYPT_ROM_TAG2;
tag[2] = le64dec(salt);
tag[3] = le64dec(salt + 8);
tag[4] = le64dec(salt + 16);
tag[5] = le64dec(salt + 24);
insecure_memzero(salt, sizeof(salt));
return 0;
fail:
insecure_memzero(salt, sizeof(salt));
if (!(params->flags & YESCRYPT_SHARED_PREALLOCATED))
free_region(shared);
return -1;
}
yescrypt_binary_t* yescrypt_digest_shared(yescrypt_shared_t* shared)
{
static yescrypt_binary_t digest;
uint64_t* tag;
if (shared->aligned_size < 48)
return NULL;
tag = (uint64_t*)((uint8_t*)shared->aligned + shared->aligned_size - 48);
if (tag[0] != YESCRYPT_ROM_TAG1 || tag[1] != YESCRYPT_ROM_TAG2)
return NULL;
le64enc(digest.uc, tag[2]);
le64enc(digest.uc + 8, tag[3]);
le64enc(digest.uc + 16, tag[4]);
le64enc(digest.uc + 24, tag[5]);
return &digest;
}
int yescrypt_free_shared(yescrypt_shared_t* shared)
{
return free_region(shared);
}
int yescrypt_init_local(yescrypt_local_t* local)
{
init_region(local);
return 0;
}
int yescrypt_free_local(yescrypt_local_t* local)
{
return free_region(local);
}
#endif /* INCLUDE_yescrypt || INCLUDE_scrypt || INCLUDE_gost_yescrypt */
|
hermm_c_dia_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < mat->rows; r++)
for(ALPHA_INT c = 0; c < columns; c++)
alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid,num_threads,columns);
ALPHA_INT bch = cross_block_high(tid,num_threads,columns);
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d < 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Complex val,val_c;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(ar,bc,ldy)],val,x[index2(ac,bc,ldx)]);
alpha_madde(y[index2(ac,bc,ldy)],val_c,x[index2(ar,bc,ldx)]);
}
}
}
if(d == 0){
for(ALPHA_INT r = 0; r < mat->rows; ++r){
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(r,bc,ldy)],val,x[index2(r,bc,ldx)]);
}
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
test3_1.c | /*
* test3_0.c and test3_1.c are not equivalent because
* they have different private list for the parallel construct.
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]) {
int nthreads, tid;
#pragma omp parallel private(nthreads)
{
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
exit(0);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
krb5_db_fmt_plug.c | /*
* KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR
* Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM)
*
* This format is one of formats saved in KDC database and used during the authentication part.
*
* This software is Copyright (c) 2012, Mougey Camille (CEA/DAM), and Lalet
* Pierre (CEA/DAM) and it is hereby released to the general public under the
* following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format :
* - user:$krb18$REALMname$hash
* - user:REALMname$hash
*
* Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR
* pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very
* simple, and 10x faster, and no obsure -lkrb5 dependency.
*
* Added support for etype 17 and etype 2/3 in October, 2017 by Dhiru Kholia.
*
* Note: Both etype 2 and 3 share the same hashing scheme!
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5_18;
extern struct fmt_main fmt_krb5_17;
extern struct fmt_main fmt_krb5_3;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5_18);
john_register_one(&fmt_krb5_17);
john_register_one(&fmt_krb5_3);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "simd-intrinsics.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#include "krb5_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "krb5-18"
#define FORMAT_LABEL_17 "krb5-17"
#define FORMAT_LABEL_3 "krb5-3"
#define FORMAT_NAME "Kerberos 5 DB etype 18"
#define FORMAT_NAME_17 "Kerberos 5 DB etype 17"
#define FORMAT_NAME_3 "Kerberos 5 DB etype 3"
#define FORMAT_TAG_18 "$krb18$"
#define FORMAT_TAG_17 "$krb17$"
#define FORMAT_TAG_3 "$krb3$"
#define TAG_LENGTH_18 (sizeof(FORMAT_TAG_18)-1)
#define TAG_LENGTH_17 (sizeof(FORMAT_TAG_17)-1)
#define TAG_LENGTH_3 (sizeof(FORMAT_TAG_3)-1)
#if SIMD_COEF_32
#define ALGORITHM_NAME "DES / PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " AES"
#else
#define ALGORITHM_NAME "DES / PBKDF2-SHA1 32/" ARCH_BITS_STR " AES"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH_18 64
#define CIPHERTEXT_LENGTH_17 32
#define CIPHERTEXT_LENGTH_3 16
#define BINARY_SIZE_18 32
#define BINARY_SIZE_17 16
#define BINARY_SIZE_3 8
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define MAX_SALT_SIZE 128
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests kinit_tests_18[] = {
{"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"},
{FORMAT_TAG_18 "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"},
{NULL}
};
static struct fmt_tests kinit_tests_17[] = {
// bare hashes are not supported for etype 17
{FORMAT_TAG_17 "TEST.LOCALtest$6fb8b78e20ad3df6591cabb9cacf4594", "password"},
{FORMAT_TAG_17 "TEST.LOCALtest$b7dc1cf2b403cf5f27ea9b2ea526dc5a", "password@123"},
{NULL}
};
static struct fmt_tests kinit_tests_3[] = {
{FORMAT_TAG_3 "INTERNAL.CORP1user3$eafdc79b7620584a", "password"},
{FORMAT_TAG_3 "EXAMPLE.COMlulu$25bfb33132c11346", "password"},
{FORMAT_TAG_3 "EXAMPLE.COMluluaaaa$97076894ae025738", "password"},
{"$krb3$EXAMPLE.COMluluaaaa$79850e6e9e5e92d0", "password@123"},
// etype 2 hash
{"$krb3$EXAMPLE.COMluluaaaa$cbb5616879c26df8", "12345678"},
{NULL},
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[8];
static struct custom_salt {
uint32_t etype;
char saved_salt[MAX_SALT_SIZE+1]; // XXX is this enough?
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char* ciphertext, int pos)
{
char *p, *q;
p = ciphertext + pos;
p = strstr(p, "$");
if (p == NULL)
return 0;
q = ciphertext;
if (p - q > MAX_SALT_SIZE) /* check salt length */
return 0;
q = ++p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F) {
q++;
}
return !*q && (q - p == CIPHERTEXT_LENGTH_18 || q - p == CIPHERTEXT_LENGTH_17 || q - p == CIPHERTEXT_LENGTH_3);
}
static int valid_18(char* ciphertext, struct fmt_main *self)
{
if (!strncmp(ciphertext, FORMAT_TAG_18, TAG_LENGTH_18))
return valid(ciphertext, TAG_LENGTH_18);
else
return valid(ciphertext, 0);
}
static int valid_17(char* ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, FORMAT_TAG_17, TAG_LENGTH_17))
return 0;
return valid(ciphertext, TAG_LENGTH_17);
}
static int valid_3(char* ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, FORMAT_TAG_3, TAG_LENGTH_3))
return 0;
return valid(ciphertext, TAG_LENGTH_3);
}
// Only supports bare hashes for etype 18
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH_18 + CIPHERTEXT_LENGTH_18 + SALT_SIZE + 1];
if (!strncmp(ciphertext, FORMAT_TAG_18, TAG_LENGTH_18) || !strncmp(ciphertext, FORMAT_TAG_17, TAG_LENGTH_17) || !strncmp(ciphertext, FORMAT_TAG_3, TAG_LENGTH_3))
return ciphertext;
memcpy(out, FORMAT_TAG_18, TAG_LENGTH_18);
strnzcpyn(out + TAG_LENGTH_18, ciphertext, CIPHERTEXT_LENGTH_18 + SALT_SIZE + 1);
return out;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG_18, TAG_LENGTH_18)) {
cs.etype = 18;
p = ciphertext + TAG_LENGTH_18;
} else if (!strncmp(ciphertext, FORMAT_TAG_17, TAG_LENGTH_17)) {
cs.etype = 17;
p = ciphertext + TAG_LENGTH_17;
} else {
cs.etype = 3;
p = ciphertext + TAG_LENGTH_3;
}
q = strstr(p, "$");
strncpy(cs.saved_salt, p, q-p);
cs.saved_salt[MAX_SALT_SIZE] = 0;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i = 0;
unsigned int binary_size = 0;
p = ciphertext;
if (!strncmp(ciphertext, FORMAT_TAG_18, TAG_LENGTH_18)) {
binary_size = 32;
p = ciphertext + TAG_LENGTH_18;
} else if (!strncmp(ciphertext, FORMAT_TAG_17, TAG_LENGTH_17)) {
binary_size = 16;
p = ciphertext + TAG_LENGTH_17;
} else {
binary_size = 8;
p = ciphertext + TAG_LENGTH_3;
}
if (!out) out = mem_alloc_tiny(binary_size, MEM_ALIGN_WORD);
p = strstr(p, "$") + 1;
for (; i < binary_size; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[32], i;
AES_KEY aeskey;
int key_size;
if (cur_salt->etype == 18 || cur_salt->etype == 17) {
#ifdef SSE_GROUP_SZ_SHA1
uint32_t Key[SSE_GROUP_SZ_SHA1][32/4];
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = Key[i];
}
if (cur_salt->etype == 18) {
key_size = 32;
} else {
key_size = 16;
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)cur_salt->saved_salt, strlen(cur_salt->saved_salt), 4096, &(x.poutc), key_size, 0);
#else
if (cur_salt->etype == 18) {
key_size = 32;
} else {
key_size = 16;
}
pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)cur_salt->saved_salt, strlen(cur_salt->saved_salt), 4096, key, key_size, 0);
#endif
i = 0;
#ifdef SSE_GROUP_SZ_SHA1
for (; i < SSE_GROUP_SZ_SHA1; ++i) {
memcpy(key, Key[i], key_size);
#endif
AES_set_encrypt_key(key, key_size * 8, &aeskey);
AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey); // the weird constant string comes from "nfold" function
AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey);
#ifdef SSE_GROUP_SZ_SHA1
}
#endif
} else if (cur_salt->etype == 3) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
des_string_to_key_shishi(saved_key[index+i], strlen(saved_key[index+i]), cur_salt->saved_salt, strlen(cur_salt->saved_salt), (unsigned char*)(crypt_out[index+i]));
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (crypt_out[index][0] == *(uint32_t*)binary)
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE_3);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_krb5_18 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_18,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG_18 },
kinit_tests_18
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_18,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact,
}
};
struct fmt_main fmt_krb5_17 = {
{
FORMAT_LABEL_17,
FORMAT_NAME_17,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_17,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG_17 },
kinit_tests_17
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_17,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact,
}
};
struct fmt_main fmt_krb5_3 = {
{
FORMAT_LABEL_3,
FORMAT_NAME_3,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_3,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG_3 },
kinit_tests_3
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_3,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact,
}
};
#endif /* plugin stanza */
|
GB_unaryop__lnot_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_fp64
// op(A') function: GB_tran__lnot_bool_fp64
// C type: bool
// A type: double
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_fp64
(
bool *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BFSFriends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.4 -------------------------------------------------*/
/* date: 1/17/2014 ---------------------------------------------*/
/* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/
/****************************************************************/
/*
Copyright (c) 2010-2014, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _BFS_FRIENDS_H_
#define _BFS_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include "SpParMat.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "ParFriends.h"
#include "BitMap.h"
#include "BitMapCarousel.h"
#include "BitMapFringe.h"
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/*********************** FRIEND FUNCTIONS FOR BFS ONLY (NO SEMIRINGS) RUNS **********************/
/***************************** BOTH PARALLEL AND SEQUENTIAL FUNCTIONS ****************************/
/*************************************************************************************************/
/**
* Multithreaded SpMV with sparse vector and preset buffers
* the assembly of outgoing buffers sendindbuf/sendnumbuf are done here
*/
template <typename IT, typename VT>
void dcsc_gespmv_threaded_setbuffers (const SpDCCols<IT, bool> & A, const int32_t * indx, const VT * numx, int32_t nnzx,
int32_t * sendindbuf, VT * sendnumbuf, int * cnts, int * dspls, int p_c)
{
Select2ndSRing<bool, VT, VT> BFSsring;
if(A.getnnz() > 0 && nnzx > 0)
{
int splits = A.getnsplit();
if(splits > 0)
{
std::vector< std::vector<int32_t> > indy(splits);
std::vector< std::vector< VT > > numy(splits);
int32_t nlocrows = static_cast<int32_t>(A.getnrow());
int32_t perpiece = nlocrows / splits;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(i != splits-1)
SpMXSpV_ForThreading<BFSsring>(*(A.GetDCSC(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
else
SpMXSpV_ForThreading<BFSsring>(*(A.GetDCSC(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
}
int32_t perproc = nlocrows / p_c;
int32_t last_rec = p_c-1;
// keep recipients of last entries in each split (-1 for an empty split)
// so that we can delete indy[] and numy[] contents as soon as they are processed
std::vector<int32_t> end_recs(splits);
for(int i=0; i<splits; ++i)
{
if(indy[i].empty())
end_recs[i] = -1;
else
end_recs[i] = std::min(indy[i].back() / perproc, last_rec);
}
int ** loc_rec_cnts = new int *[splits];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
loc_rec_cnts[i] = new int[p_c](); // thread-local recipient data
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
int32_t cur_rec = std::min( indy[i].front() / perproc, last_rec);
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename std::vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec)
{
cur_rec = std::min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
}
++loc_rec_cnts[i][cur_rec];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
// FACT: Data is sorted, so if the recipient of begin is the same as the owner of end,
// then the whole data is sent to the same processor
int32_t beg_rec = std::min( indy[i].front() / perproc, last_rec);
int32_t alreadysent = 0; // already sent per recipient
for(int before = i-1; before >= 0; before--)
alreadysent += loc_rec_cnts[before][beg_rec];
if(beg_rec == end_recs[i]) // fast case
{
std::transform(indy[i].begin(), indy[i].end(), indy[i].begin(), std::bind2nd(std::minus<int32_t>(), perproc*beg_rec));
std::copy(indy[i].begin(), indy[i].end(), sendindbuf + dspls[beg_rec] + alreadysent);
std::copy(numy[i].begin(), numy[i].end(), sendnumbuf + dspls[beg_rec] + alreadysent);
}
else // slow case
{
int32_t cur_rec = beg_rec;
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename std::vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec )
{
cur_rec = std::min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
// if this split switches to a new recipient after sending some data
// then it's sure that no data has been sent to that recipient yet
alreadysent = 0;
}
sendindbuf[ dspls[cur_rec] + alreadysent ] = (*it) - perproc*cur_rec; // convert to receiver's local index
sendnumbuf[ dspls[cur_rec] + (alreadysent++) ] = *(numy[i].begin() + (it-indy[i].begin()));
}
}
}
}
// Deallocated rec counts serially once all threads complete
for(int i=0; i< splits; ++i)
{
for(int j=0; j< p_c; ++j)
cnts[j] += loc_rec_cnts[i][j];
delete [] loc_rec_cnts[i];
}
delete [] loc_rec_cnts;
}
else
{
std::cout << "Something is wrong, splits should be nonzero for multithreaded execution" << std::endl;
}
}
}
/**
* Step 3 of the sparse SpMV algorithm, without the semiring (BFS only)
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename VT, typename IT, typename UDER>
void LocalSpMV(const SpParMat<IT,bool,UDER> & A, int rowneighs, OptBuf<int32_t, VT > & optbuf, int32_t * & indacc, VT * & numacc, int * sendcnt, int accnz)
{
#ifdef TIMING
double t0=MPI_Wtime();
#endif
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
generic_gespmv_threaded_setbuffers< Select2ndSRing<bool, VT, VT> > (*(A.spSeq), indacc, numacc, (int32_t) accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
// by-pass dcsc_gespmv call
if(A.getlocalnnz() > 0 && accnz > 0)
{
// ABAB: ignoring optbuf.isthere here
// \TODO: Remove .isthere from optbuf definition
SpMXSpV< Select2ndSRing<bool, VT, VT> >(*((A.spSeq)->GetInternal()), (int32_t) A.getlocalrows(), indacc, numacc,
accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
}
DeleteAll(indacc,numacc);
}
else
{
SpParHelper::Print("BFS only (no semiring) function only work with optimization buffers\n");
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_localspmvtime += (t1-t0);
#endif
}
template <typename IU, typename VT>
void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs)
{
#ifdef TIMING
double t0=MPI_Wtime();
#endif
// free memory of y, in case it was aliased
std::vector<IU>().swap(y.ind);
std::vector<VT>().swap(y.num);
#ifndef HEAPMERGE
IU ysize = y.MyLocLength(); // my local length is only O(n/p)
bool * isthere = new bool[ysize];
std::vector< std::pair<IU,VT> > ts_pairs;
std::fill_n(isthere, ysize, false);
// We don't need to keep a "merger" because minimum will always come from the processor
// with the smallest rank; so a linear sweep over the received buffer is enough
for(int i=0; i<rowneighs; ++i)
{
for(int j=0; j< recvcnt[i]; ++j)
{
int32_t index = recvindbuf[rdispls[i] + j];
if(!isthere[index])
ts_pairs.push_back(std::make_pair(index, recvnumbuf[rdispls[i] + j]));
}
}
DeleteAll(recvcnt, rdispls);
DeleteAll(isthere, recvindbuf, recvnumbuf);
__gnu_parallel::sort(ts_pairs.begin(), ts_pairs.end());
int nnzy = ts_pairs.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = ts_pairs[i].first;
y.num[i] = ts_pairs[i].second;
}
#else
// Alternative 2: Heap-merge
int32_t hsize = 0;
int32_t inf = std::numeric_limits<int32_t>::min();
int32_t sup = std::numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[rowneighs]();
for(int32_t i=0; i<rowneighs; ++i)
{
if(recvcnt[i] > 0)
{
// key, proc_id
sHeap.insert(recvindbuf[rdispls[i]], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
y.ind.push_back( static_cast<IU>(key));
y.num.push_back(recvnumbuf[rdispls[locv]]); // nothing is processed yet
if( (++(processed[locv])) < recvcnt[locv] )
sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv);
else
--hsize;
}
// ofstream oput;
// y.commGrid->OpenDebugFile("Merge", oput);
// oput << "From displacements: "; copy(rdispls, rdispls+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl;
// oput << "From counts: "; copy(recvcnt, recvcnt+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl;
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
IU deref = rdispls[locv] + processed[locv];
if(y.ind.back() != static_cast<IU>(key)) // y.ind is surely not empty
{
y.ind.push_back(static_cast<IU>(key));
y.num.push_back(recvnumbuf[deref]);
}
if( (++(processed[locv])) < recvcnt[locv] )
sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv);
else
--hsize;
}
DeleteAll(recvcnt, rdispls,processed);
DeleteAll(recvindbuf, recvnumbuf);
#endif
#ifdef TIMING
double t1=MPI_Wtime();
cblas_mergeconttime += (t1-t0);
#endif
}
/**
* This is essentially a SpMV for BFS because it lacks the semiring.
* It naturally justs selects columns of A (adjacencies of frontier) and
* merges with the minimum entry succeeding. SpParMat has to be boolean
* input and output vectors are of type VT but their indices are IT
*/
template <typename VT, typename IT, typename UDER>
FullyDistSpVec<IT,VT> SpMV (const SpParMat<IT,bool,UDER> & A, const FullyDistSpVec<IT,VT> & x, OptBuf<int32_t, VT > & optbuf)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IT lenuntil;
int32_t *trxinds, *indacc;
VT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, true); // trxinds (and potentially trxnums) is allocated
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, true); // trxinds (and potentially trxnums) is deallocated, indacc/numacc allocated
FullyDistSpVec<IT, VT> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
int rowneighs; MPI_Comm_size(RowWorld,&rowneighs);
int * sendcnt = new int[rowneighs]();
LocalSpMV(A, rowneighs, optbuf, indacc, numacc, sendcnt, accnz); // indacc/numacc deallocated
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
VT * recvnumbuf = new VT[totrecv];
#ifdef TIMING
double t2=MPI_Wtime();
#endif
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<VT>(), recvnumbuf, recvcnt, rdispls, MPIType<VT>(), RowWorld);
delete [] sendcnt;
}
else
{
SpParHelper::Print("BFS only (no semiring) function only work with optimization buffers\n");
}
#ifdef TIMING
double t3=MPI_Wtime();
cblas_alltoalltime += (t3-t2);
#endif
MergeContributions(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
return y;
}
template <typename VT, typename IT, typename UDER>
SpDCCols<int,bool>::SpColIter* CalcSubStarts(SpParMat<IT,bool,UDER> & A, FullyDistSpVec<IT,VT> & x, BitMapCarousel<IT,VT> &done) {
std::shared_ptr<CommGrid> cg = A.getcommgrid();
IT rowuntil = x.LengthUntil();
MPI_Comm RowWorld = cg->GetRowWorld();
MPI_Bcast(&rowuntil, 1, MPIType<IT>(), 0, RowWorld);
int numcols = cg->GetGridCols();
SpDCCols<int,bool>::SpColIter colit = A.seq().begcol();
#ifdef THREADED
SpDCCols<int,bool>::SpColIter* starts = new SpDCCols<int,bool>::SpColIter[numcols*cblas_splits+1];
for(int c=0; c<numcols; c++) {
IT curr_sub_start = done.GetGlobalStartOfLocal(c) - rowuntil;
IT next_sub_start = done.GetGlobalEndOfLocal(c) - rowuntil;
IT sub_range = next_sub_start - curr_sub_start;
IT per_thread = (sub_range + cblas_splits - 1) / cblas_splits;
IT curr_thread_start = curr_sub_start;
for (int t=0; t<cblas_splits; t++) {
while(colit.colid() < curr_thread_start) {
++colit;
}
starts[c*cblas_splits + t] = colit;
curr_thread_start = std::min(curr_thread_start + per_thread, next_sub_start);
}
}
starts[numcols*cblas_splits] = A.seq().endcol();
#else
SpDCCols<int,bool>::SpColIter* starts = new SpDCCols<int,bool>::SpColIter[numcols+1];
for(int c=0; c<numcols; c++) {
IT next_start = done.GetGlobalStartOfLocal(c) - rowuntil;
while(colit.colid() < next_start) {
++colit;
}
starts[c] = colit;
}
starts[numcols] = A.seq().endcol();
#endif
return starts;
}
template <typename VT, typename IT>
void UpdateParents(MPI_Comm & RowWorld, std::pair<IT,IT> *updates, int num_updates, FullyDistVec<IT,VT> &parents, int source, int dest, BitMapFringe<int64_t,int64_t> &bm_fringe) {
int send_words = num_updates<<1, recv_words;
MPI_Status status;
MPI_Sendrecv(&send_words, 1, MPI_INT, dest, PUPSIZE,
&recv_words, 1, MPI_INT, source, PUPSIZE, RowWorld, &status);
std::pair<IT,IT>* recv_buff = new std::pair<IT,IT>[recv_words>>1];
MPI_Sendrecv(updates, send_words, MPIType<IT>(), dest, PUPDATA,
recv_buff, recv_words, MPIType<IT>(), source, PUPDATA, RowWorld, &status);
#ifdef THREADED
#pragma omp parallel for
#endif
for (int i=0; i<recv_words>>1; i++) {
parents.SetLocalElement(recv_buff[i].first, recv_buff[i].second);
}
bm_fringe.IncrementNumSet((recv_words>>1));
delete[] recv_buff;
}
template <typename VT, typename IT, typename UDER>
void BottomUpStep(SpParMat<IT,bool,UDER> & A, FullyDistSpVec<IT,VT> & x, BitMapFringe<int64_t,int64_t> &bm_fringe, FullyDistVec<IT,VT> & parents, BitMapCarousel<IT,VT> &done, SpDCCols<int,bool>::SpColIter* starts)
{
std::shared_ptr<CommGrid> cg = A.getcommgrid();
MPI_Comm World = cg->GetWorld();
MPI_Comm ColWorld = cg->GetColWorld();
MPI_Comm RowWorld = cg->GetRowWorld();
MPI_Status status;
// get row and column offsets
IT rowuntil = x.LengthUntil(), my_coluntil = x.LengthUntil(), coluntil;
int diagneigh = cg->GetComplementRank();
MPI_Sendrecv(&my_coluntil, 1, MPIType<IT>(), diagneigh, TROST, &coluntil, 1, MPIType<IT>(), diagneigh, TROST, World, &status);
MPI_Bcast(&coluntil, 1, MPIType<IT>(), 0, ColWorld);
MPI_Bcast(&rowuntil, 1, MPIType<IT>(), 0, RowWorld);
BitMap* frontier = bm_fringe.TransposeGather();
done.SaveOld();
#ifdef THREADED
const int buff_size = 8192;
std::pair<IT,IT>* local_update_heads[cblas_splits];
for (int t=0; t<cblas_splits; t++)
local_update_heads[t] = new std::pair<IT,IT>[buff_size];
#endif
// do bottom up work
int numcols = cg->GetGridCols();
int mycol = cg->GetRankInProcRow();
std::pair<IT,IT>* parent_updates = new std::pair<IT,IT>[done.SizeOfChunk()<<1]; // over-allocated
for (int sub_step=0; sub_step<numcols; sub_step++) {
int num_updates = 0;
IT sub_start = done.GetGlobalStartOfLocal();
int dest_slice = (mycol + sub_step) % numcols;
int source_slice = (mycol - sub_step + numcols) % numcols;
#ifdef BOTTOMUPTIME
double t1 = MPI_Wtime();
#endif
#ifdef THREADED
#pragma omp parallel
{
int id = omp_get_thread_num();
int num_locals=0;
SpDCCols<int,bool>::SpColIter::NzIter nzit, nzit_end;
SpDCCols<int,bool>::SpColIter colit, colit_end;
std::pair<IT,IT>* local_updates = local_update_heads[id];
// vector<pair<IT,IT> > local_updates;
colit_end = starts[dest_slice*cblas_splits + id + 1];
for(colit = starts[dest_slice*cblas_splits + id]; colit != colit_end; ++colit) {
int32_t local_row_ind = colit.colid();
IT row = local_row_ind + rowuntil;
if (!done.GetBit(row)) {
nzit_end = A.seq().endnz(colit);
for(nzit = A.seq().begnz(colit); nzit != nzit_end; ++nzit) {
int32_t local_col_ind = nzit.rowid();
IT col = local_col_ind + coluntil;
if (frontier->get_bit(local_col_ind)) {
// local_updates.push_back(make_pair(row-sub_start, col));
if (num_locals == buff_size) {
int copy_start = __sync_fetch_and_add(&num_updates, buff_size);
std::copy(local_updates, local_updates + buff_size, parent_updates + copy_start);
num_locals = 0;
}
local_updates[num_locals++] = std::make_pair(row-sub_start, col);
done.SetBit(row);
break;
}
}
}
}
int copy_start = __sync_fetch_and_add(&num_updates, num_locals);
std::copy(local_updates, local_updates + num_locals, parent_updates + copy_start);
}
#else
SpDCCols<int,bool>::SpColIter::NzIter nzit, nzit_end;
SpDCCols<int,bool>::SpColIter colit, colit_end;
colit_end = starts[dest_slice+1];
for(colit = starts[dest_slice]; colit != colit_end; ++colit)
{
int32_t local_row_ind = colit.colid();
IT row = local_row_ind + rowuntil;
if (!done.GetBit(row))
{
nzit_end = A.seq().endnz(colit);
for(nzit = A.seq().begnz(colit); nzit != nzit_end; ++nzit)
{
int32_t local_col_ind = nzit.rowid();
IT col = local_col_ind + coluntil;
if (frontier->get_bit(local_col_ind))
{
parent_updates[num_updates++] = std::make_pair(row-sub_start, col);
done.SetBit(row);
break;
}
} // end_for
} // end_if
} // end_for
#endif
#ifdef BOTTOMUPTIME
double t2 = MPI_Wtime();
bu_local += (t2-t1);
t1 = MPI_Wtime();
#endif
done.RotateAlongRow();
#ifdef BOTTOMUPTIME
t2 = MPI_Wtime();
bu_rotate += (t2-t1);
t1 = MPI_Wtime();
#endif
UpdateParents(RowWorld, parent_updates, num_updates, parents, source_slice, dest_slice, bm_fringe);
#ifdef BOTTOMUPTIME
t2 = MPI_Wtime();
bu_update += (t2-t1);
#endif
}
bm_fringe.LoadFromNext();
done.UpdateFringe(bm_fringe);
#ifdef THREADED
for (int t=0; t<cblas_splits; t++)
delete[] local_update_heads[t];
#endif
delete[] parent_updates;
}
}
#endif
|
GB_unop__sin_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fp32_fp32)
// op(A') function: GB (_unop_tran__sin_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stat_ops.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "stat_ops.h"
#include "utility.h"
#include "constant.h"
// calculate norm
double state_norm_squared(const CTYPE *state, ITYPE dim) {
ITYPE index;
double norm = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:norm)
#endif
for (index = 0; index < dim; ++index){
norm += pow(cabs(state[index]), 2);
}
return norm;
}
// calculate inner product of two state vector
CTYPE state_inner_product(const CTYPE *state_bra, const CTYPE *state_ket, ITYPE dim) {
#ifndef _MSC_VER
CTYPE value = 0;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:value)
#endif
for(index = 0; index < dim; ++index){
value += conj(state_bra[index]) * state_ket[index];
}
return value;
#else
double real_sum = 0.;
double imag_sum = 0.;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:real_sum,imag_sum)
#endif
for (index = 0; index < dim; ++index) {
CTYPE value;
value += conj(state_bra[index]) * state_ket[index];
real_sum += creal(value);
imag_sum += cimag(value);
}
return real_sum + 1.i * imag_sum;
#endif
}
|
raytrace_pixel.c | /*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
* File: raytrace_pixel.c
*
* MATLAB Coder version : 5.1
* C/C++ source code generated on : 19-Apr-2021 21:23:36
*/
/* Include Files */
#include "raytrace_pixel.h"
#include "cosd.h"
#include "cotd.h"
#include "mexable_los2.h"
#include "mod.h"
#include "my_aer2geodetic.h"
#include "rt_nonfinite.h"
#include "shadowing_latlon_loop_emxutil.h"
#include "shadowing_latlon_loop_types.h"
#include "sind.h"
#include "tand.h"
#include "rt_nonfinite.h"
#include <math.h>
/* Function Definitions */
/*
* Unpack raytracing params
* Arguments : double lat
* double b_long
* double c_raytracing_params_t_calculati
* double d_raytracing_params_t_calculati
* double raytracing_params_t_steps
* const struct2_T *c_raytracing_params_public_rast
* double c_raytracing_params_extended_sa
* const struct3_T raytracing_params_ref_sphere
* const emxArray_real_T *c_raytracing_params_extended_el
* double raytracing_params_max_distance
* double c_local_raytracing_params_heigh
* double local_raytracing_params_aspect
* double local_raytracing_params_slope
* const emxArray_real_T *c_local_raytracing_params_h_arr
* const emxArray_real_T *c_local_raytracing_params_decl_
* emxArray_real_T *theta_arr
* Return Type : void
*/
void raytrace_pixel(double lat, double b_long, double
c_raytracing_params_t_calculati, double
d_raytracing_params_t_calculati, double
raytracing_params_t_steps, const struct2_T
*c_raytracing_params_public_rast, double
c_raytracing_params_extended_sa, const struct3_T
raytracing_params_ref_sphere, const emxArray_real_T
*c_raytracing_params_extended_el, double
raytracing_params_max_distance, double
c_local_raytracing_params_heigh, double
local_raytracing_params_aspect, double
local_raytracing_params_slope, const emxArray_real_T
*c_local_raytracing_params_h_arr, const emxArray_real_T
*c_local_raytracing_params_decl_, emxArray_real_T *theta_arr)
{
emxArray_boolean_T *sun_vis;
emxArray_int32_T *r;
emxArray_int8_T *sigma_ns;
emxArray_real32_T *sigma_ew;
emxArray_real32_T *sigma_w;
emxArray_real_T *az_sun;
emxArray_real_T *elev_sun;
emxArray_real_T *gamma_so;
emxArray_real_T *h;
emxArray_real_T *long_sun;
emxArray_real_T *t_arr;
emxArray_real_T *theta;
emxArray_real_T *theta_matrix;
emxArray_real_T *theta_z;
emxArray_real_T *z1;
double apnd;
double cdiff;
double ndbl;
double r_moon;
double slant_range;
double t_arr_start;
float f;
int i;
int i1;
int i2;
int idx;
int k;
int loop_ub;
int n;
int nm1d2;
int t_calculation_step_idx;
emxInit_real_T(&theta_matrix, 2);
r_moon = raytracing_params_ref_sphere.Radius;
/* unpack local params */
i = theta_matrix->size[0] * theta_matrix->size[1];
theta_matrix->size[0] = (int)d_raytracing_params_t_calculati;
theta_matrix->size[1] = (int)c_raytracing_params_t_calculati;
emxEnsureCapacity_real_T(theta_matrix, i);
loop_ub = (int)d_raytracing_params_t_calculati * (int)
c_raytracing_params_t_calculati;
for (i = 0; i < loop_ub; i++) {
theta_matrix->data[i] = 0.0;
}
loop_ub = (int)d_raytracing_params_t_calculati - 1;
#pragma omp parallel \
num_threads(omp_get_max_threads()) \
private(r,z1,theta,t_arr,sigma_ns,sigma_w,theta_z,sigma_ew,az_sun,elev_sun,h,long_sun,gamma_so,sun_vis,slant_range,t_arr_start,k,nm1d2,n,f,ndbl,apnd,cdiff)
{
emxInit_int32_T(&r, 2);
emxInit_real_T(&z1, 2);
emxInit_real_T(&theta, 2);
emxInit_real_T(&t_arr, 2);
emxInit_int8_T(&sigma_ns, 2);
emxInit_real32_T(&sigma_w, 2);
emxInit_real_T(&theta_z, 2);
emxInit_real32_T(&sigma_ew, 2);
emxInit_real_T(&az_sun, 2);
emxInit_real_T(&elev_sun, 2);
emxInit_real_T(&h, 2);
emxInit_real_T(&long_sun, 2);
emxInit_real_T(&gamma_so, 2);
emxInit_boolean_T(&sun_vis, 2);
#pragma omp for nowait
for (t_calculation_step_idx = 0; t_calculation_step_idx <= loop_ub;
t_calculation_step_idx++) {
/* Time loop */
k = theta->size[0] * theta->size[1];
theta->size[0] = 1;
theta->size[1] = (int)c_raytracing_params_t_calculati;
emxEnsureCapacity_real_T(theta, k);
nm1d2 = (int)c_raytracing_params_t_calculati;
for (k = 0; k < nm1d2; k++) {
theta->data[k] = rtNaN;
}
t_arr_start = (((double)t_calculation_step_idx + 1.0) - 1.0) *
c_raytracing_params_t_calculati + 1.0;
slant_range = (t_arr_start + c_raytracing_params_t_calculati) - 1.0;
if (slant_range > raytracing_params_t_steps) {
slant_range = raytracing_params_t_steps;
}
if (rtIsNaN(t_arr_start) || rtIsNaN(slant_range)) {
k = t_arr->size[0] * t_arr->size[1];
t_arr->size[0] = 1;
t_arr->size[1] = 1;
emxEnsureCapacity_real_T(t_arr, k);
t_arr->data[0] = rtNaN;
} else if (slant_range < t_arr_start) {
t_arr->size[0] = 1;
t_arr->size[1] = 0;
} else if ((rtIsInf(t_arr_start) || rtIsInf(slant_range)) && (t_arr_start ==
slant_range)) {
k = t_arr->size[0] * t_arr->size[1];
t_arr->size[0] = 1;
t_arr->size[1] = 1;
emxEnsureCapacity_real_T(t_arr, k);
t_arr->data[0] = rtNaN;
} else if (t_arr_start == t_arr_start) {
k = t_arr->size[0] * t_arr->size[1];
t_arr->size[0] = 1;
nm1d2 = (int)floor(slant_range - t_arr_start);
t_arr->size[1] = nm1d2 + 1;
emxEnsureCapacity_real_T(t_arr, k);
for (k = 0; k <= nm1d2; k++) {
t_arr->data[k] = t_arr_start + (double)k;
}
} else {
ndbl = floor((slant_range - t_arr_start) + 0.5);
apnd = t_arr_start + ndbl;
cdiff = apnd - slant_range;
if (fabs(cdiff) < 4.4408920985006262E-16 * fmax(fabs(t_arr_start), fabs
(slant_range))) {
ndbl++;
apnd = slant_range;
} else if (cdiff > 0.0) {
apnd = t_arr_start + (ndbl - 1.0);
} else {
ndbl++;
}
if (ndbl >= 0.0) {
n = (int)ndbl;
} else {
n = 0;
}
k = t_arr->size[0] * t_arr->size[1];
t_arr->size[0] = 1;
t_arr->size[1] = n;
emxEnsureCapacity_real_T(t_arr, k);
if (n > 0) {
t_arr->data[0] = t_arr_start;
if (n > 1) {
t_arr->data[n - 1] = apnd;
nm1d2 = (n - 1) / 2;
for (k = 0; k <= nm1d2 - 2; k++) {
t_arr->data[k + 1] = t_arr_start + ((double)k + 1.0);
t_arr->data[(n - k) - 2] = apnd - ((double)k + 1.0);
}
if (nm1d2 << 1 == n - 1) {
t_arr->data[nm1d2] = (t_arr_start + apnd) / 2.0;
} else {
t_arr->data[nm1d2] = t_arr_start + (double)nm1d2;
t_arr->data[nm1d2 + 1] = apnd - (double)nm1d2;
}
}
}
}
/* array of time values to calculate for at this iteration */
if (t_arr->size[1] != 0) {
k = elev_sun->size[0] * elev_sun->size[1];
elev_sun->size[0] = 1;
elev_sun->size[1] = t_arr->size[1];
emxEnsureCapacity_real_T(elev_sun, k);
nm1d2 = t_arr->size[0] * t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
elev_sun->data[k] = c_local_raytracing_params_decl_->data[(int)
t_arr->data[k] - 1];
}
k = long_sun->size[0] * long_sun->size[1];
long_sun->size[0] = 1;
long_sun->size[1] = t_arr->size[1];
emxEnsureCapacity_real_T(long_sun, k);
nm1d2 = t_arr->size[0] * t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = c_local_raytracing_params_h_arr->data[(int)
t_arr->data[k] - 1] + 180.0;
}
k = h->size[0] * h->size[1];
h->size[0] = 1;
h->size[1] = long_sun->size[1];
emxEnsureCapacity_real_T(h, k);
nm1d2 = long_sun->size[1];
for (k = 0; k < nm1d2; k++) {
h->data[k] = b_mod(long_sun->data[k], 360.0);
}
k = h->size[0] * h->size[1];
n = h->size[0] * h->size[1];
h->size[0] = 1;
emxEnsureCapacity_real_T(h, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
h->data[k] -= 180.0;
}
k = sigma_ew->size[0] * sigma_ew->size[1];
sigma_ew->size[0] = 1;
sigma_ew->size[1] = t_arr->size[1];
emxEnsureCapacity_real32_T(sigma_ew, k);
nm1d2 = t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_ew->data[k] = 1.0F;
}
nm1d2 = h->size[1];
k = long_sun->size[0] * long_sun->size[1];
long_sun->size[0] = 1;
long_sun->size[1] = h->size[1];
emxEnsureCapacity_real_T(long_sun, k);
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = fabs(h->data[k]);
}
slant_range = lat;
b_cotd(&slant_range);
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = elev_sun->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = elev_sun->size[0] * elev_sun->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = elev_sun->data[k];
}
b_tand(gamma_so);
k = gamma_so->size[0] * gamma_so->size[1];
n = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
emxEnsureCapacity_real_T(gamma_so, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
gamma_so->data[k] *= slant_range;
}
k = z1->size[0] * z1->size[1];
z1->size[0] = 1;
n = gamma_so->size[1];
z1->size[1] = gamma_so->size[1];
emxEnsureCapacity_real_T(z1, k);
for (k = 0; k < n; k++) {
z1->data[k] = fmax(gamma_so->data[k], -1.0);
}
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = z1->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = z1->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = fmin(z1->data[k], 1.0);
}
nm1d2 = gamma_so->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = 57.295779513082323 * acos(gamma_so->data[k]);
}
n = long_sun->size[1];
for (k = 0; k < n; k++) {
if (long_sun->data[k] > gamma_so->data[k]) {
sigma_ew->data[k] = -1.0F;
}
}
k = sigma_ns->size[0] * sigma_ns->size[1];
sigma_ns->size[0] = 1;
sigma_ns->size[1] = t_arr->size[1];
emxEnsureCapacity_int8_T(sigma_ns, k);
nm1d2 = t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_ns->data[k] = 1;
}
k = sun_vis->size[0] * sun_vis->size[1];
sun_vis->size[0] = 1;
sun_vis->size[1] = elev_sun->size[1];
emxEnsureCapacity_boolean_T(sun_vis, k);
nm1d2 = elev_sun->size[0] * elev_sun->size[1];
for (k = 0; k < nm1d2; k++) {
sun_vis->data[k] = (lat * (lat - elev_sun->data[k]) < 0.0);
}
n = sun_vis->size[1] - 1;
nm1d2 = 0;
for (k = 0; k <= n; k++) {
if (sun_vis->data[k]) {
nm1d2++;
}
}
k = r->size[0] * r->size[1];
r->size[0] = 1;
r->size[1] = nm1d2;
emxEnsureCapacity_int32_T(r, k);
nm1d2 = 0;
for (k = 0; k <= n; k++) {
if (sun_vis->data[k]) {
r->data[nm1d2] = k + 1;
nm1d2++;
}
}
nm1d2 = r->size[0] * r->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_ns->data[r->data[k] - 1] = -1;
}
k = sigma_w->size[0] * sigma_w->size[1];
sigma_w->size[0] = 1;
sigma_w->size[1] = t_arr->size[1];
emxEnsureCapacity_real32_T(sigma_w, k);
nm1d2 = t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_w->data[k] = 1.0F;
}
n = h->size[1];
for (k = 0; k < n; k++) {
if (h->data[k] < 0.0) {
sigma_w->data[k] = -1.0F;
}
}
k = long_sun->size[0] * long_sun->size[1];
long_sun->size[0] = 1;
long_sun->size[1] = elev_sun->size[1];
emxEnsureCapacity_real_T(long_sun, k);
nm1d2 = elev_sun->size[0] * elev_sun->size[1];
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = elev_sun->data[k];
}
c_cosd(long_sun);
k = theta_z->size[0] * theta_z->size[1];
theta_z->size[0] = 1;
theta_z->size[1] = elev_sun->size[1];
emxEnsureCapacity_real_T(theta_z, k);
nm1d2 = elev_sun->size[0] * elev_sun->size[1];
for (k = 0; k < nm1d2; k++) {
theta_z->data[k] = elev_sun->data[k];
}
c_sind(theta_z);
slant_range = lat;
b_sind(&slant_range);
t_arr_start = lat;
b_cosd(&t_arr_start);
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = h->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = h->size[0] * h->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = h->data[k];
}
c_cosd(gamma_so);
k = theta_z->size[0] * theta_z->size[1];
n = theta_z->size[0] * theta_z->size[1];
theta_z->size[0] = 1;
emxEnsureCapacity_real_T(theta_z, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
theta_z->data[k] = theta_z->data[k] * slant_range + long_sun->data[k] *
t_arr_start * gamma_so->data[k];
}
nm1d2 = theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
theta_z->data[k] = acos(theta_z->data[k]);
}
/* rounding error can cause gamma_so to be complex, fix with max(-1,min(1, RES )) */
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = theta_z->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = theta_z->size[0] * theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = theta_z->data[k];
}
nm1d2 = theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = sin(gamma_so->data[k]);
}
c_sind(h);
k = h->size[0] * h->size[1];
n = h->size[0] * h->size[1];
h->size[0] = 1;
emxEnsureCapacity_real_T(h, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
h->data[k] = h->data[k] * long_sun->data[k] / gamma_so->data[k];
}
k = long_sun->size[0] * long_sun->size[1];
long_sun->size[0] = 1;
long_sun->size[1] = h->size[1];
emxEnsureCapacity_real_T(long_sun, k);
nm1d2 = h->size[1];
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = fmax(-1.0, h->data[k]);
}
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = long_sun->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = long_sun->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = fmin(1.0, long_sun->data[k]);
}
nm1d2 = gamma_so->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = 57.295779513082323 * asin(gamma_so->data[k]);
}
n = theta_z->size[1];
for (k = 0; k < n; k++) {
if (theta_z->data[k] == 0.0) {
gamma_so->data[k] = 0.0;
}
}
/* Avoid division by 0 error */
k = sigma_ew->size[0] * sigma_ew->size[1];
n = sigma_ew->size[0] * sigma_ew->size[1];
sigma_ew->size[0] = 1;
emxEnsureCapacity_real32_T(sigma_ew, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
sigma_ew->data[k] *= (float)sigma_ns->data[k];
}
k = sigma_ew->size[0] * sigma_ew->size[1];
n = sigma_ew->size[0] * sigma_ew->size[1];
sigma_ew->size[0] = 1;
emxEnsureCapacity_real32_T(sigma_ew, n);
nm1d2 = k - 1;
for (k = 0; k <= nm1d2; k++) {
f = sigma_ew->data[k];
f = f * (float)gamma_so->data[k] + (1.0F - f) / 2.0F * sigma_w->data[k]
* 180.0F;
sigma_ew->data[k] = f;
}
k = long_sun->size[0] * long_sun->size[1];
long_sun->size[0] = 1;
long_sun->size[1] = theta_z->size[1];
emxEnsureCapacity_real_T(long_sun, k);
nm1d2 = theta_z->size[0] * theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = theta_z->data[k];
}
nm1d2 = theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
long_sun->data[k] = sin(long_sun->data[k]);
}
k = gamma_so->size[0] * gamma_so->size[1];
gamma_so->size[0] = 1;
gamma_so->size[1] = theta_z->size[1];
emxEnsureCapacity_real_T(gamma_so, k);
nm1d2 = theta_z->size[0] * theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = theta_z->data[k];
}
nm1d2 = theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
gamma_so->data[k] = cos(gamma_so->data[k]);
}
slant_range = local_raytracing_params_slope;
b_cosd(&slant_range);
t_arr_start = local_raytracing_params_slope;
b_sind(&t_arr_start);
k = sigma_w->size[0] * sigma_w->size[1];
sigma_w->size[0] = 1;
sigma_w->size[1] = sigma_ew->size[1];
emxEnsureCapacity_real32_T(sigma_w, k);
nm1d2 = sigma_ew->size[0] * sigma_ew->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_w->data[k] = sigma_ew->data[k] - (float)
local_raytracing_params_aspect;
}
d_cosd(sigma_w);
k = sigma_w->size[0] * sigma_w->size[1];
sigma_w->size[0] = 1;
sigma_w->size[1] = gamma_so->size[1];
emxEnsureCapacity_real32_T(sigma_w, k);
nm1d2 = gamma_so->size[0] * gamma_so->size[1] - 1;
for (k = 0; k <= nm1d2; k++) {
sigma_w->data[k] = (float)(gamma_so->data[k] * slant_range) + (float)
(long_sun->data[k] * t_arr_start) * sigma_w->data[k];
}
nm1d2 = sigma_w->size[1];
for (k = 0; k < nm1d2; k++) {
sigma_w->data[k] = acosf(sigma_w->data[k]);
}
nm1d2 = sigma_w->size[1];
for (k = 0; k < nm1d2; k++) {
theta->data[k] = sigma_w->data[k];
}
n = theta_z->size[1];
for (k = 0; k < n; k++) {
if (theta_z->data[k] > 1.5707963267948966) {
theta->data[k] = rtNaN;
}
}
n = theta->size[1];
for (k = 0; k < n; k++) {
if (theta->data[k] > 1.5707963267948966) {
theta->data[k] = rtNaN;
}
}
k = az_sun->size[0] * az_sun->size[1];
az_sun->size[0] = 1;
az_sun->size[1] = t_arr->size[1];
emxEnsureCapacity_real_T(az_sun, k);
nm1d2 = t_arr->size[1];
for (k = 0; k < nm1d2; k++) {
az_sun->data[k] = rtNaN;
}
n = t_arr->size[1];
for (k = 0; k < n; k++) {
if (!(lat < elev_sun->data[k])) {
az_sun->data[k] = sigma_ew->data[k] + 180.0F;
}
}
n = t_arr->size[1];
for (k = 0; k < n; k++) {
if (lat < elev_sun->data[k]) {
az_sun->data[k] = -sigma_ew->data[k];
}
}
k = elev_sun->size[0] * elev_sun->size[1];
elev_sun->size[0] = 1;
elev_sun->size[1] = theta_z->size[1];
emxEnsureCapacity_real_T(elev_sun, k);
nm1d2 = theta_z->size[0] * theta_z->size[1];
for (k = 0; k < nm1d2; k++) {
elev_sun->data[k] = 90.0 - 57.295779513082323 * theta_z->data[k];
}
slant_range = 1.1 * raytracing_params_max_distance;
/* ensure outside of grid */
/* no longer necessary to switch to double with rewritten builtins */
my_aer2geodetic(az_sun, elev_sun, slant_range, lat, b_long,
c_local_raytracing_params_heigh,
raytracing_params_ref_sphere.MeanRadius,
raytracing_params_ref_sphere.Flattening, h, long_sun,
gamma_so);
k = elev_sun->size[0] * elev_sun->size[1];
elev_sun->size[0] = 1;
elev_sun->size[1] = h->size[1];
emxEnsureCapacity_real_T(elev_sun, k);
nm1d2 = h->size[1];
for (k = 0; k < nm1d2; k++) {
elev_sun->data[k] = lat;
}
k = az_sun->size[0] * az_sun->size[1];
az_sun->size[0] = 1;
az_sun->size[1] = h->size[1];
emxEnsureCapacity_real_T(az_sun, k);
nm1d2 = h->size[1];
for (k = 0; k < nm1d2; k++) {
az_sun->data[k] = b_long;
}
mexable_los2(c_raytracing_params_extended_el, elev_sun, az_sun, h,
long_sun, gamma_so, r_moon, c_raytracing_params_extended_sa,
c_raytracing_params_public_rast, sun_vis);
n = theta_z->size[1];
for (k = 0; k < n; k++) {
if (theta_z->data[k] == 0.0) {
sun_vis->data[k] = true;
}
}
n = sun_vis->size[1];
for (k = 0; k < n; k++) {
if (!sun_vis->data[k]) {
theta->data[k] = rtNaN;
}
}
if (theta->size[1] < c_raytracing_params_t_calculati) {
if ((double)theta->size[1] + 1.0 > c_raytracing_params_t_calculati) {
k = 0;
n = 0;
} else {
k = theta->size[1];
n = (int)c_raytracing_params_t_calculati;
}
nm1d2 = n - k;
for (n = 0; n < nm1d2; n++) {
theta->data[k + n] = rtNaN;
}
}
nm1d2 = theta->size[1];
for (k = 0; k < nm1d2; k++) {
theta_matrix->data[t_calculation_step_idx + theta_matrix->size[0] * k]
= theta->data[k];
}
}
}
emxFree_boolean_T(&sun_vis);
emxFree_real_T(&gamma_so);
emxFree_real_T(&long_sun);
emxFree_real_T(&h);
emxFree_real_T(&elev_sun);
emxFree_real_T(&az_sun);
emxFree_real32_T(&sigma_ew);
emxFree_real_T(&theta_z);
emxFree_real32_T(&sigma_w);
emxFree_int8_T(&sigma_ns);
emxFree_real_T(&t_arr);
emxFree_real_T(&theta);
emxFree_real_T(&z1);
emxFree_int32_T(&r);
}
i = theta_arr->size[0] * theta_arr->size[1];
theta_arr->size[0] = 1;
loop_ub = (int)(c_raytracing_params_t_calculati *
d_raytracing_params_t_calculati);
theta_arr->size[1] = loop_ub;
emxEnsureCapacity_real_T(theta_arr, i);
for (i = 0; i < loop_ub; i++) {
theta_arr->data[i] = rtNaN;
}
i = (int)d_raytracing_params_t_calculati;
for (idx = 0; idx < i; idx++) {
/* get values in correct order for time series */
r_moon = c_raytracing_params_t_calculati * (((double)idx + 1.0) - 1.0) + 1.0;
if (r_moon > c_raytracing_params_t_calculati * ((double)idx + 1.0)) {
i1 = 1;
} else {
i1 = (int)r_moon;
}
loop_ub = theta_matrix->size[1];
for (i2 = 0; i2 < loop_ub; i2++) {
theta_arr->data[(i1 + i2) - 1] = theta_matrix->data[idx +
theta_matrix->size[0] * i2];
}
}
emxFree_real_T(&theta_matrix);
i = theta_arr->size[0] * theta_arr->size[1];
if (1.0 > raytracing_params_t_steps) {
theta_arr->size[1] = 0;
} else {
theta_arr->size[1] = (int)raytracing_params_t_steps;
}
emxEnsureCapacity_real_T(theta_arr, i);
}
/*
* File trailer for raytrace_pixel.c
*
* [EOF]
*/
|
GB_unop__identity_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint64
// op(A') function: GB_unop_tran__identity_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint64
(
int64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test-zrocks.c | /* xZTL: Zone Translation Layer User-space Library
*
* Copyright 2019 Samsung Electronics
*
* Written by Ivan L. Picoli <i.picoli@samsung.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <omp.h>
#include <stdint.h>
#include <stdlib.h>
#include <xztl.h>
#include <libzrocks.h>
#include "CUnit/Basic.h"
/* Number of Objects */
#define TEST_N_BUFFERS 2
/* Number of random objects to read */
#define TEST_RANDOM_ID 2
/* Object Size */
#define TEST_BUFFER_SZ (1024 * 1024 * 16) /* 16 MB */
static uint8_t *wbuf[TEST_N_BUFFERS];
static uint8_t *rbuf[TEST_N_BUFFERS];
static const char **devname;
static void cunit_zrocks_assert_ptr(char *fn, void *ptr) {
CU_ASSERT((uint64_t)ptr != 0);
if (!ptr)
printf("\n %s: ptr %p\n", fn, ptr);
}
static void cunit_zrocks_assert_int(char *fn, uint64_t status) {
CU_ASSERT(status == 0);
if (status)
printf("\n %s: %lx\n", fn, status);
}
static int cunit_zrocks_init(void) {
return 0;
}
static int cunit_zrocks_exit(void) {
return 0;
}
static void test_zrocks_init(void) {
int ret;
ret = zrocks_init(*devname);
cunit_zrocks_assert_int("zrocks_init", ret);
}
static void test_zrocks_exit(void) {
zrocks_exit();
}
static void test_zrocks_fill_buffer(uint32_t id) {
uint32_t byte;
uint8_t value = 0x1;
for (byte = 0; byte < TEST_BUFFER_SZ; byte += 16) {
value += 0x1;
memset(&wbuf[id][byte], value, 16);
}
}
static int test_zrocks_check_buffer(uint32_t id, uint32_t off, uint32_t size) {
/*printf (" \nMem check:\n");
for (int i = off; i < off + size; i++) {
if (i % 16 == 0 && i)
printf("\n %d-%d ", i - (i%16), (i - (i%16)) + 16);
printf (" %x/%x", wbuf[id][i], rbuf[id][i]);
}
printf("\n");
*/
return memcmp(wbuf[id], rbuf[id], size);
}
static void test_zrocks_new(void) {
uint32_t ids;
uint64_t id;
uint32_t size;
uint8_t level;
int ret[TEST_N_BUFFERS];
ids = TEST_N_BUFFERS;
size = TEST_BUFFER_SZ;
level = 0;
#pragma omp parallel for
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
wbuf[id] = xztl_media_dma_alloc(size);
cunit_zrocks_assert_ptr("xztl_media_dma_alloc", wbuf[id]);
if (!wbuf[id])
continue;
test_zrocks_fill_buffer(id);
ret[id] = zrocks_new(id + 1, wbuf[id], size, level);
cunit_zrocks_assert_int("zrocks_new", ret[id]);
}
}
static void test_zrocks_read(void) {
uint32_t ids, offset;
uint64_t id;
int ret[TEST_N_BUFFERS];
size_t read_sz, size;
ids = TEST_N_BUFFERS;
read_sz = 1024 * 64; /* 64 KB */
size = TEST_BUFFER_SZ;
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
rbuf[id] = xztl_media_dma_alloc(size);
cunit_zrocks_assert_ptr("xztl_media_dma_alloc", rbuf[id]);
if (!rbuf[id])
continue;
memset(rbuf[id], 0x0, size);
offset = 0;
while (offset < size) {
ret[id] =
zrocks_read_obj(id + 1, offset, rbuf[id] + offset, read_sz);
cunit_zrocks_assert_int("zrocks_read_obj", ret[id]);
if (ret[id])
printf("Read error: ID %lu, offset %d, status: %x\n", id + 1,
offset, ret[id]);
offset += read_sz;
}
ret[id] = test_zrocks_check_buffer(id, 0, TEST_BUFFER_SZ);
cunit_zrocks_assert_int("zrocks_read_obj:check", ret[id]);
if (ret[id])
printf("Corruption: ID %lu, corrupted: %d bytes\n", id + 1,
ret[id]);
xztl_media_dma_free(rbuf[id]);
}
}
static void test_zrocks_random_read(void) {
uint64_t id;
uint64_t random_off[4] = {63, 24567, 175678, 267192};
size_t random_sz[4] = {532, 53, 2695, 1561};
// uint64_t random_off[1] = {24567};
// size_t random_sz[1] = {53};
int readi, ret;
uint8_t *buf, *woff;
id = TEST_RANDOM_ID;
buf = xztl_media_dma_alloc(1024 * 512);
cunit_zrocks_assert_ptr("xztl_media_dma_alloc", buf);
if (!buf)
return;
for (readi = 0; readi < 4; readi++) {
memset(buf, 0x0, random_sz[readi]);
ret = zrocks_read_obj(id, random_off[readi], buf, random_sz[readi]);
cunit_zrocks_assert_int("zrocks_read_obj", ret);
woff = &wbuf[id - 1][random_off[readi]];
/* Uncomment for a detailed read check (per-byte print)
printf (" \nMem check:\n");
for (int i = 0; i < random_sz[readi] + 4096; i++) {
if (i % 16 == 0)
printf("\n %lu-%lu ",
(i+random_off[readi]) - ((i+random_off[readi]) % 16)
+ random_off[readi] % 16,
((i+random_off[readi]) - ((i+random_off[readi]) %
16)) + 16 + random_off[readi] % 16); printf (" %x/%x", woff[i],
buf[i]);
}
printf("\n");
*/
cunit_zrocks_assert_int("zrocks_read_obj:check",
memcmp(woff, buf, random_sz[readi]));
}
xztl_media_dma_free(buf);
for (int i = 0; i < TEST_N_BUFFERS; i++) xztl_media_dma_free(wbuf[i]);
}
int main(int argc, const char **argv) {
int failed;
if (argc < 2) {
printf("Please provide the device path. e.g. liou:/dev/nvme0n2\n");
return -1;
}
devname = &argv[1];
printf("Device: %s\n", *devname);
CU_pSuite pSuite = NULL;
if (CUE_SUCCESS != CU_initialize_registry())
return CU_get_error();
pSuite = CU_add_suite("Suite_zrocks", cunit_zrocks_init, cunit_zrocks_exit);
if (pSuite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if ((CU_add_test(pSuite, "Initialize ZRocks", test_zrocks_init) == NULL) ||
(CU_add_test(pSuite, "ZRocks New", test_zrocks_new) == NULL) ||
(CU_add_test(pSuite, "ZRocks Read", test_zrocks_read) == NULL) ||
(CU_add_test(pSuite, "ZRocks Random Read", test_zrocks_random_read) ==
NULL) ||
(CU_add_test(pSuite, "Close ZRocks", test_zrocks_exit) == NULL)) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
failed = CU_get_number_of_tests_failed();
CU_cleanup_registry();
return failed;
}
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <limits>
#include <vector>
#include <utility>
#include <string>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> {
double scalar;
bool is_int;
DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(1)
.describe("Scalar input value");
DMLC_DECLARE_FIELD(is_int)
.set_default(true)
.describe("Indicate whether scalar input is int type");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream scalar_s, is_int_s;
scalar_s << std::setprecision(std::numeric_limits<double>::max_digits10) << scalar;
is_int_s << is_int;
(*dict)["scalar"] = scalar_s.str();
(*dict)["is_int"] = is_int_s.str();
}
};
inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
if (common::is_int(in_attrs->at(0)) && !scalar_is_int) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64);
} else if (in_attrs->at(0) == mshadow::kBool) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64);
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
}
return out_attrs->at(0) != -1;
}
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename OP>
static void Compute_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
TBlob temp_tblob;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) ||
(inputs[0].type_flag_ == kBool)) {
Tensor<cpu, 1, DType> temp_tensor =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
Compute_<OP>(attrs, ctx, s, inputs, req, outputs);
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
TBlob temp_tblob;
if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) {
Tensor<xpu, 1, double> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(temp_tblob.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename OP>
static void Backward_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
Backward_<OP>(attrs, s, inputs, req, outputs);
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_arguments(NumpyBinaryScalarParam::__FIELDS__())
#if MXNET_USE_CUDA
struct BinaryScalarRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
};
struct BinaryScalarRTCBackward {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
H2Pack_SPDHSS_H2.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#ifdef __linux__
#include <malloc.h>
#endif
#include "H2Pack_config.h"
#include "H2Pack_typedef.h"
#include "H2Pack_aux_structs.h"
#include "H2Pack_utils.h"
#include "H2Pack_SPDHSS_H2.h"
#include "utils.h"
// Build explicit U matrices from nested U matrices
// Input parameter:
// h2pack : H2Pack data structure after H2P_build()
// Output parameter:
// exU : Size h2pack->n_node, explicit U matrices for each node
void H2P_build_explicit_U(H2Pack_p h2pack, H2P_dense_mat_p **exU_)
{
int n_node = h2pack->n_node;
int n_leaf_node = h2pack->n_leaf_node;
int n_thread = h2pack->n_thread;
int max_level = h2pack->max_level;
int max_child = h2pack->max_child;
int *n_child = h2pack->n_child;
int *children = h2pack->children;
int *level_nodes = h2pack->level_nodes;
int *level_n_node = h2pack->level_n_node;
int *mat_cluster = h2pack->mat_cluster;
H2P_dense_mat_p *U = h2pack->U;
H2P_thread_buf_p *thread_buf = h2pack->tb;
H2P_dense_mat_p *exU = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
ASSERT_PRINTF(exU != NULL, "Failed to allocate %d explicit U matrices\n", n_node);
memset(exU, 0, sizeof(H2P_dense_mat_p) * n_node);
for (int i = max_level; i >= 1; i--)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
int n_thread_i = MIN(level_i_n_node, n_thread);
#pragma omp parallel num_threads(n_thread_i)
{
int tid = omp_get_thread_num();
H2P_int_vec_p idx = thread_buf[tid]->idx0;
H2P_int_vec_set_capacity(idx, max_child);
#pragma omp for schedule(dynamic) nowait
for (int j = 0; j < level_i_n_node; j++)
{
int node = level_i_nodes[j];
int n_child_node = n_child[node];
H2P_dense_mat_p U_node = U[node];
int s_row = mat_cluster[2 * node];
int n_row = mat_cluster[2 * node + 1] - s_row + 1;
if (U_node->nrow == 0) continue;
if (n_child_node == 0)
{
H2P_dense_mat_init(&exU[node], U_node->nrow, U_node->ncol);
ASSERT_PRINTF(n_row == exU[node]->nrow, "Node %d exU got %d rows, expected %d rows\n", node, exU[node]->nrow, n_row);
copy_matrix_block(sizeof(DTYPE), U_node->nrow, U_node->ncol, U_node->data, U_node->ld, exU[node]->data, exU[node]->ld);
} else {
// Multiple each child nodes' exU with U{node} directly
int *node_children = children + node * max_child;
int bd_U_nrow = 0, bd_U_ncol = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
bd_U_nrow += exU[child_k]->nrow;
bd_U_ncol += exU[child_k]->ncol;
}
ASSERT_PRINTF(n_row == bd_U_nrow, "Node %d exU got %d rows, expected %d rows\n", node, bd_U_nrow, n_row);
ASSERT_PRINTF(U_node->nrow == bd_U_ncol, "Node %d U has %d rows, but diagblk(exU) has %d columns\n", node, U_node->nrow, bd_U_ncol);
H2P_dense_mat_init(&exU[node], bd_U_nrow, U_node->ncol);
bd_U_nrow = 0;
bd_U_ncol = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
H2P_dense_mat_p exU_k = exU[child_k];
DTYPE *exU_node_k_row = exU[node]->data + bd_U_nrow * exU[node]->ld;
DTYPE *U_node_k_col = U_node->data + bd_U_ncol * U_node->ld;
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, exU_k->nrow, U_node->ncol, exU_k->ncol,
1.0, exU_k->data, exU_k->ld, U_node_k_col, U_node->ld, 0.0, exU_node_k_row, exU[node]->ld
);
bd_U_nrow += exU_k->nrow;
bd_U_ncol += exU_k->ncol;
}
} // End of "if (n_child_node == 0)"
} // End of j loop
} // End of "#pragma omp parallel"
} // End of i loop
for (int i = 0; i < n_node; i++)
{
if (exU[i] != NULL) continue;
H2P_dense_mat_init(&exU[i], 8, 1);
exU[i]->nrow = 0;
exU[i]->ncol = 0;
exU[i]->ld = 0;
}
*exU_ = exU;
}
// Compute the level of two node's lowest common ancestor
// Input parameters:
// parent : Size n_node, parent of each node
// node_level : Size n_node, level of each node
// n_level : Total number of levels (max_level+1 since root is level 0)
// node{0, 1} : Target node pair
// work : Work buffer, size >= 2 * n_level
int H2P_tree_common_ancestor_level(
const int *parent, const int *node_level, const int n_level,
const int node0, const int node1, int *work
)
{
int *path0 = work;
int *path1 = work + n_level;
memset(path0, 0, sizeof(int) * n_level);
memset(path1, 0, sizeof(int) * n_level);
int p0 = node0, p1 = node1;
int level0 = node_level[node0], level1 = node_level[node1];
for (int i = level0; i >= 0; i--)
{
path0[i] = p0;
p0 = parent[p0];
}
for (int i = level1; i >= 0; i--)
{
path1[i] = p1;
p1 = parent[p1];
}
int min_level_01 = level0 < level1 ? level0 : level1;
int level = 0;
for (int i = 0; i <= min_level_01; i++)
{
if (path0[i] != path1[i])
{
level = i - 1;
break;
}
}
return level;
}
// Accumulate partial H2 matvec results for H2P_SPDHSS_H2_build()
// Input parameters:
// h2mat : Source H2 matrix structure
// n_vec : Use n_vec Gaussian random vectors
// Output parameter:
// *Yk_ : Matrix, size h2mat->n_node * h2mat->max_level, each non-empty element is
// a matrix of n_vec columns
void H2P_SPDHSS_H2_acc_matvec(H2Pack_p h2mat, const int n_vec, H2P_dense_mat_p **Yk_)
{
int n_node = h2mat->n_node;
int n_leaf_node = h2mat->n_leaf_node;
int n_thread = h2mat->n_thread;
int max_level = h2mat->max_level;
int max_child = h2mat->max_child;
int min_adm_level = h2mat->min_adm_level;
int *parent = h2mat->parent;
int *n_child = h2mat->n_child;
int *children = h2mat->children;
int *level_nodes = h2mat->level_nodes;
int *level_n_node = h2mat->level_n_node;
int *leaf_nodes = h2mat->height_nodes;
int *node_level = h2mat->node_level;
int *mat_cluster = h2mat->mat_cluster;
int *B_p2i_rowptr = h2mat->B_p2i_rowptr;
int *B_p2i_colidx = h2mat->B_p2i_colidx;
int *D_p2i_rowptr = h2mat->D_p2i_rowptr;
int *D_p2i_colidx = h2mat->D_p2i_colidx;
H2P_dense_mat_p *U = h2mat->U;
H2P_thread_buf_p *thread_buf = h2mat->tb;
// 1. Build explicit U matrix for each node
H2P_dense_mat_p *exU;
H2P_build_explicit_U(h2mat, &exU);
// 2. Prepare the Gaussian random matrix vec and Yk_mat
// Yk_mat(:, n_vec*(i-1) + 1:n_vec) stores the matvec results for nodes at the i-th level
const int kms = h2mat->krnl_mat_size;
const int Yk_mat_ld = n_vec * max_level;
size_t vec_msize = sizeof(DTYPE) * (size_t) kms * (size_t) n_vec;
DTYPE *vec = (DTYPE*) malloc(vec_msize);
DTYPE *Yk_mat = (DTYPE*) malloc(vec_msize * max_level); // Note: we have max_level+1 levels in total
ASSERT_PRINTF(vec != NULL && Yk_mat != NULL, "Failed to allocate space for accu_matvec\n");
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
int s_row, n_row;
calc_block_spos_len(kms, n_thread, tid, &s_row, &n_row);
H2P_gen_normal_distribution(0.0, 1.0, (size_t) n_row * (size_t) n_vec, vec + (size_t) s_row * (size_t) n_vec);
size_t Yk_mat_offset = (size_t) s_row * (size_t) n_vec * (size_t) max_level;
size_t Yk_mat_nelem = (size_t) n_row * (size_t) n_vec * (size_t) max_level;
memset(Yk_mat + Yk_mat_offset, 0, sizeof(DTYPE) * Yk_mat_nelem);
}
// 3. H2 matvec upward sweep
H2P_dense_mat_p *y0 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
ASSERT_PRINTF(y0 != NULL, "Failed to allocate %d working matrices\n", n_node);
for (int i = 0; i < n_node; i++) y0[i] = NULL;
for (int i = max_level; i >= min_adm_level; i--)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
int n_thread_i = MIN(level_i_n_node, n_thread);
#pragma omp parallel num_threads(n_thread_i)
{
#pragma omp for schedule(dynamic)
for (int j = 0; j < level_i_n_node; j++)
{
int node = level_i_nodes[j];
int n_child_node = n_child[node];
H2P_dense_mat_p U_node = U[node];
H2P_dense_mat_init(&y0[node], U_node->ncol, n_vec);
if (n_child_node == 0)
{
// Leaf node, directly multiply x_j with U_j^T
int s_row = mat_cluster[2 * node];
int e_row = mat_cluster[2 * node + 1];
int nrow = e_row - s_row + 1;
DTYPE *vec_blk = vec + (size_t)s_row * (size_t)n_vec;
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, U_node->ncol, n_vec, nrow,
1.0, U_node->data, U_node->ld, vec_blk, n_vec, 0.0, y0[node]->data, y0[node]->ld
);
} else {
// Non-leaf node, concatenate y0 in the children nodes and multiply it with U_j^T
// Multiple U{node} with each child nodes' y0 directly
int *node_children = children + node * max_child;
int y0_tmp_nrow = 0;
DTYPE beta = 0.0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
H2P_dense_mat_p y0_k = y0[child_k];
DTYPE *U_node_k_row = U_node->data + y0_tmp_nrow * U_node->ld;
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, U_node->ncol, n_vec, y0_k->nrow,
1.0, U_node_k_row, U_node->ld, y0_k->data, y0_k->ld, beta, y0[node]->data, y0[node]->ld
);
beta = 1.0;
y0_tmp_nrow += y0[child_k]->nrow;
} // End of k loop
} // End of "if (n_child_node == 0)"
} // End of j loop
} // End of "#pragma omp parallel"
} // End of i loop
// 4. For each pair of siblings (i, j), compute
// Yk{i} += Aij * vec_j
// Yk{j} += Aij' * vec_i
// Yk{i/j} are stored in the corresponding columns of the actual Yk
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_int_vec_p work = thread_buf[tid]->idx0;
H2P_dense_mat_p Dij = thread_buf[tid]->mat0;
H2P_dense_mat_p Bij = thread_buf[tid]->mat0;
H2P_dense_mat_p tmpM = thread_buf[tid]->mat1;
H2P_int_vec_set_capacity(work, 2 * max_level + 4);
// 4.1 Inadmissible pairs
#pragma omp for schedule(dynamic)
for (int node0 = 0; node0 < n_node; node0++)
{
int s_row0 = mat_cluster[2 * node0];
int e_row0 = mat_cluster[2 * node0 + 1];
int n_row0 = e_row0 - s_row0 + 1;
for (int i = D_p2i_rowptr[node0]; i < D_p2i_rowptr[node0 + 1]; i++)
{
int node1 = D_p2i_colidx[i];
if (node0 == node1) continue;
int ca_level = H2P_tree_common_ancestor_level(parent, node_level, max_level+1, node0, node1, work->data) + 1;
int s_col = n_vec * (ca_level - 1);
int s_row1 = mat_cluster[2 * node1];
int e_row1 = mat_cluster[2 * node1 + 1];
int n_row1 = e_row1 - s_row1 + 1;
size_t Yk_mat_offset = (size_t) s_row0 * (size_t) Yk_mat_ld + (size_t) s_col;
DTYPE *Yk_mat_blk0 = Yk_mat + Yk_mat_offset;
DTYPE *vec_blk1 = vec + (size_t)s_row1 * (size_t)n_vec;
int Dij_nrow, Dij_ncol, Dij_ld, Dij_trans;
H2P_get_Dij_block(h2mat, node0, node1, Dij);
DTYPE *Dij_data = Dij->data;
if (Dij->ld > 0)
{
Dij_nrow = Dij->nrow;
Dij_ncol = Dij->ncol;
Dij_ld = Dij->ld;
Dij_trans = 0;
} else {
Dij_nrow = Dij->ncol;
Dij_ncol = Dij->nrow;
Dij_ld = -Dij->ld;
Dij_trans = 1;
}
// We only handle:
// Yk_mat(idx1, col_idx) = Yk_mat(idx1, col_idx) + D{D_idx} * vec(idx2, :);
// it's symmetric operation:
// Yk_mat(idx2, col_idx) = Yk_mat(idx2, col_idx) + D{D_idx}' * vec(idx1, :);
// is handled by double counting node inadmissible pairs
ASSERT_PRINTF(
Dij_nrow == n_row0 && Dij_ncol == n_row1,
"D{%d, %d} has size %d * %d, expected %d * %d\n",
node0, node1, Dij_nrow, Dij_ncol, n_row0, n_row1
);
CBLAS_TRANSPOSE Dij_trans_ = (Dij_trans == 0) ? CblasNoTrans : CblasTrans;
CBLAS_GEMM(
CblasRowMajor, Dij_trans_, CblasNoTrans, n_row0, n_vec, n_row1,
1.0, Dij_data, Dij_ld, vec_blk1, n_vec, 1.0, Yk_mat_blk0, Yk_mat_ld
);
} // End of i loop
} // End of node0 loop
// 4.2 Admissible pairs
#pragma omp barrier
for (int i = max_level; i >= min_adm_level; i--)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
#pragma omp barrier
#pragma omp for schedule(dynamic)
for (int j = 0; j < level_i_n_node; j++)
{
int node0 = level_i_nodes[j];
int s_row0 = mat_cluster[2 * node0];
int e_row0 = mat_cluster[2 * node0 + 1];
int n_row0 = e_row0 - s_row0 + 1;
int level0 = node_level[node0];
H2P_dense_mat_p exU_0 = exU[node0];
for (int k = B_p2i_rowptr[node0]; k < B_p2i_rowptr[node0 + 1]; k++)
{
int node1 = B_p2i_colidx[k];
if (node0 == node1) continue;
int ca_level = H2P_tree_common_ancestor_level(parent, node_level, max_level+1, node0, node1, work->data) + 1;
int s_col = n_vec * (ca_level - 1);
int s_row1 = mat_cluster[2 * node1];
int e_row1 = mat_cluster[2 * node1 + 1];
int n_row1 = e_row1 - s_row1 + 1;
int level1 = node_level[node1];
int Bij_nrow, Bij_ncol, Bij_ld, Bij_trans;
H2P_get_Bij_block(h2mat, node0, node1, Bij);
DTYPE *Bij_data = Bij->data;
if (Bij->ld > 0)
{
Bij_nrow = Bij->nrow;
Bij_ncol = Bij->ncol;
Bij_ld = Bij->ld;
Bij_trans = 0;
} else {
Bij_nrow = Bij->ncol;
Bij_ncol = Bij->nrow;
Bij_ld = -Bij->ld;
Bij_trans = 1;
}
H2P_dense_mat_p y0_1 = y0[node1];
size_t Yk_mat_offset = (size_t) s_row0 * (size_t) Yk_mat_ld + (size_t) s_col;
DTYPE *Yk_mat_blk0 = Yk_mat + Yk_mat_offset;
DTYPE *vec_blk1 = vec + (size_t)s_row1 * (size_t) n_vec;
// We only handle the update on Yk_mat_blk0, the symmetric operation for
// updating Yk_mat_blk1 is handled by double counting the admissible pairs
// A. Two nodes are of the same level, compress on both side
if (level0 == level1)
{
// Yk_mat(idx1, col_idx) = Yk_mat(idx1, col_idx) + exU{c1} * (Bij * y0{c2});
ASSERT_PRINTF(
exU_0->ncol == Bij_nrow && Bij_ncol == y0_1->nrow,
"Pair (%d, %d) GEMM size mismatch: [%d, %d] * [%d, %d] * [%d, %d]\n",
node0, node1, exU_0->nrow, exU_0->ncol, Bij_nrow, Bij_ncol, y0_1->nrow, y0_1->ncol
);
ASSERT_PRINTF(
n_row0 == exU_0->nrow && n_vec == y0_1->ncol,
"Pair (%d, %d) matrix addition size mismatch: expected [%d, %d], got [%d, %d]\n",
node0, node1, n_row0, n_vec, exU_0->nrow, y0_1->ncol
);
H2P_dense_mat_resize(tmpM, Bij_nrow, y0_1->ncol);
CBLAS_TRANSPOSE Bij_trans_ = (Bij_trans == 0) ? CblasNoTrans : CblasTrans;
CBLAS_GEMM(
CblasRowMajor, Bij_trans_, CblasNoTrans, Bij_nrow, y0_1->ncol, Bij_ncol,
1.0, Bij_data, Bij_ld, y0_1->data, y0_1->ld, 0.0, tmpM->data, tmpM->ld
);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, exU_0->nrow, tmpM->ncol, exU_0->ncol,
1.0, exU_0->data, exU_0->ld, tmpM->data, tmpM->ld, 1.0, Yk_mat_blk0, Yk_mat_ld
);
} // End of "if (level0 == level1)"
// B. node1 is a leaf node and its level is larger than node0,
// only compress on node0's side
if (level0 > level1)
{
// Yk_mat(idx1, col_idx) = Yk_mat(idx1, col_idx) + exU{c1} * (Bij * vec(idx2, :));
ASSERT_PRINTF(
exU_0->ncol == Bij_nrow && Bij_ncol == n_row1,
"Pair (%d, %d) GEMM size mismatch: [%d, %d] * [%d, %d] * [%d, %d]\n",
node0, node1, exU_0->nrow, exU_0->ncol, Bij_nrow, Bij_ncol, n_row1, n_vec
);
ASSERT_PRINTF(
n_row0 == exU_0->nrow,
"Pair (%d, %d) matrix addition size mismatch: expected [%d, %d], got [%d, %d]\n",
node0, node1, n_row0, n_vec, exU_0->nrow, n_vec
);
H2P_dense_mat_resize(tmpM, Bij_nrow, n_vec);
CBLAS_TRANSPOSE Bij_trans_ = (Bij_trans == 0) ? CblasNoTrans : CblasTrans;
CBLAS_GEMM(
CblasRowMajor, Bij_trans_, CblasNoTrans, Bij_nrow, n_vec, Bij_ncol,
1.0, Bij_data, Bij_ld, vec_blk1, n_vec, 0.0, tmpM->data, tmpM->ld
);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, exU_0->nrow, tmpM->ncol, exU_0->ncol,
1.0, exU_0->data, exU_0->ld, tmpM->data, tmpM->ld, 1.0, Yk_mat_blk0, Yk_mat_ld
);
} // End of "if (level0 > level1)"
// C. node0 is a leaf node and its level is larger than node1,
// only compress on node1's side
if (level0 < level1)
{
// Yk_mat(idx1, col_idx) = Yk_mat(idx1, col_idx) + Bij * y0{c2};
ASSERT_PRINTF(
n_row0 == Bij_nrow && Bij_ncol == y0_1->nrow && y0_1->ncol == n_vec,
"Pair (%d, %d) GEMM & matrix addition size mismatch: [%d, %d] + [%d, %d] * [%d, %d]\n",
node0, node1, n_row0, n_vec, Bij_nrow, Bij_ncol, y0_1->nrow, y0_1->ncol
);
CBLAS_TRANSPOSE Bij_trans_ = (Bij_trans == 0) ? CblasNoTrans : CblasTrans;
CBLAS_GEMM(
CblasRowMajor, Bij_trans_, CblasNoTrans, Bij_nrow, y0_1->ncol, Bij_ncol,
1.0, Bij_data, Bij_ld, y0_1->data, y0_1->ld, 1.0, Yk_mat_blk0, Yk_mat_ld
);
} // End of "if (level0 < level1)"
} // End of k loop (admissible pairs)
} // End of j loop (nodes in this level)
} // End of i loop (level)
} // End of "#pragma omp parallel"
// 5. Accumulate the results in Yk_mat to lead nodes
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
int s_row, n_row;
calc_block_spos_len(kms, n_thread, tid, &s_row, &n_row);
for (int level = 1; level < max_level; level++)
{
int s_col = n_vec * (level - 1);
for (int i = s_row; i < s_row + n_row; i++)
{
size_t Yk_mat_offset = (size_t) i * (size_t) Yk_mat_ld;
DTYPE *Yk_mat_i = Yk_mat + Yk_mat_offset;
#pragma omp simd
for (int j = s_col; j < s_col + n_vec; j++)
Yk_mat_i[j + n_vec] += Yk_mat_i[j];
}
}
} // End of "#pragma omp parallel"
// 6. Repack Yk_mat into Yk
H2P_dense_mat_p *Yk = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node * max_level);
ASSERT_PRINTF(Yk != NULL, "Failed to allocate %d * %d Yk matrices\n", n_node, max_level);
for (int i = 0; i < n_node * max_level; i++) Yk[i] = NULL;
#pragma omp parallel num_threads(n_thread)
{
#pragma omp for schedule(dynamic)
for (int i = 0; i < n_leaf_node; i++)
{
int node = leaf_nodes[i];
int level = node_level[node];
int s_row = mat_cluster[2 * node];
int e_row = mat_cluster[2 * node + 1];
int n_row = e_row - s_row + 1;
for (int j = level - 1; j >= 0; j--)
{
int s_col = j * n_vec;
size_t Yk_mat_offset = (size_t) s_row * (size_t) Yk_mat_ld + (size_t) s_col;
DTYPE *Yk_mat_blk = Yk_mat + Yk_mat_offset;
int Yk_idx = node * max_level + (level - 1 - j);
H2P_dense_mat_init(&Yk[Yk_idx], n_row, n_vec);
ASSERT_PRINTF(Yk[Yk_idx] != NULL, "Failed to allocate %d * %d Yk[idx] matrices\n", n_row, n_vec);
H2P_dense_mat_p Yk_ij = Yk[Yk_idx];
copy_matrix_block(sizeof(DTYPE), n_row, n_vec, Yk_mat_blk, Yk_mat_ld, Yk_ij->data, Yk_ij->ld);
}
} // End of i loop
} // End of "#pragma omp parallel"
for (int i = 0; i < n_node * max_level; i++)
{
if (Yk[i] != NULL) continue;
H2P_dense_mat_init(&Yk[i], 8, 8);
Yk[i]->nrow = 0;
Yk[i]->ncol = 0;
Yk[i]->ld = 0;
}
// 7. Free intermediate arrays
for (int i = 0; i < n_node; i++)
{
H2P_dense_mat_destroy(&exU[i]);
H2P_dense_mat_destroy(&y0[i]);
}
free(exU);
free(y0);
free(vec);
free(Yk_mat);
*Yk_ = Yk;
}
// Gather matrices in HSS_B into a large matrix tmpB s.t. the i-th row j-th column
// block of tmpB is HSS_B[HSS_B_pair2idx(blk0[i], blk1[j])]
// Input parameters:
// n_blk{0, 1} : Number of row & column blocks
// blk{0, 1} : Node indices of row & column blocks
// n_node : Number of nodes
// HSS_B_p2i_{*} : CSR matrix array triple, convert (i, j) pair to an index for HSS_B
// HSS_B : Source HSS_B matrices
// Output parameter:
// tmpB : Result matrix
void H2P_SPDHSS_H2_gather_HSS_B(
const int n_blk0, const int n_blk1, const int *blk0, const int *blk1, const int n_node,
const int *HSS_B_p2i_rowptr, const int *HSS_B_p2i_colidx, const int *HSS_B_p2i_val,
H2P_dense_mat_p *HSS_B, H2P_dense_mat_p tmpB
)
{
int nrow = 0, ncol = 0;
int B_idx_00 = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[0], blk1[0]);
ASSERT_PRINTF(B_idx_00 > 0, "SPDHSS_B{%d, %d} does not exist!\n", blk0[0], blk1[0]);
B_idx_00--;
int nrow0 = HSS_B[B_idx_00]->nrow;
int ncol0 = HSS_B[B_idx_00]->ncol;
// Count the total number of rows
for (int i = 0; i < n_blk0; i++)
{
int B_idx_i0 = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[i], blk1[0]);
ASSERT_PRINTF(B_idx_i0 > 0, "SPDHSS_B{%d, %d} does not exist!\n", blk0[i], blk1[0]);
B_idx_i0--;
if (HSS_B[B_idx_i0]->ncol != ncol0)
{
ERROR_PRINTF(
"SPDHSS_B{%d, %d} ncol = %d, expected %d\n",
blk0[i], blk1[0], HSS_B[B_idx_i0]->ncol, ncol0
);
return;
}
nrow += HSS_B[B_idx_i0]->nrow;
} // End of i loop
// Count the total number of columns
for (int j = 0; j < n_blk1; j++)
{
int B_idx_0j = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[0], blk1[j]);
ASSERT_PRINTF(B_idx_0j > 0, "SPDHSS_B{%d, %d} does not exist!\n", blk0[0], blk1[j]);
B_idx_0j--;
if (HSS_B[B_idx_0j]->nrow != nrow0)
{
ERROR_PRINTF(
"SPDHSS_B{%d, %d} nrow = %d, expected %d\n",
blk0[0], blk1[j], HSS_B[B_idx_0j]->nrow, nrow0
);
return;
}
ncol += HSS_B[B_idx_0j]->ncol;
} // End of j loop
// Gather each block
H2P_dense_mat_resize(tmpB, nrow, ncol);
int s_row = 0;
for (int i = 0; i < n_blk0; i++)
{
int s_col = 0, nrow_i = 0, ncol_j;
for (int j = 0; j < n_blk1; j++)
{
int B_idx_ij = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[i], blk1[j]);
ASSERT_PRINTF(B_idx_ij > 0, "SPDHSS_B{%d, %d} does not exist!\n", blk0[i], blk1[j]);
B_idx_ij--;
DTYPE *tmpB_ij = tmpB->data + s_row * tmpB->ld + s_col;
H2P_dense_mat_p HSS_Bij = HSS_B[B_idx_ij];
nrow_i = HSS_Bij->nrow;
ncol_j = HSS_Bij->ncol;
copy_matrix_block(sizeof(DTYPE), nrow_i, ncol_j, HSS_Bij->data, HSS_Bij->ld, tmpB_ij, tmpB->ld);
s_col += ncol_j;
} // End of j loop
s_row += nrow_i;
} // End of i loop
}
// Remove unused HSS_B matrices
// Input parameters:
// n_blk{0, 1} : Number of row & column blocks
// blk{0, 1} : Node indices of row & column blocks
// n_node : Number of nodes
// HSS_B_p2i_{*} : CSR matrix array triple, convert (i, j) pair to an index for HSS_B
// HSS_B : Source HSS_B matrices
// Output parameter:
// HSS_B_p2i_{*} : Updated CSR matrix array triple
// HSS_B : Updated HSS_B matrices (some unused Bij matrices will be deleted)
void H2P_SPDHSS_H2_clean_HSS_B(
const int n_blk0, const int n_blk1, const int *blk0, const int *blk1, const int n_node,
const int *HSS_B_p2i_rowptr, const int *HSS_B_p2i_colidx, int *HSS_B_p2i_val, H2P_dense_mat_p *HSS_B
)
{
for (int i = 0; i < n_blk0; i++)
{
for (int j = 0; j < n_blk1; j++)
{
int B_idx_ij = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[i], blk1[j]);
ASSERT_PRINTF(B_idx_ij > 0, "SPDHSS_B{%d, %d} does not exist!\n", blk0[i], blk1[j]);
B_idx_ij--;
H2P_dense_mat_destroy(&HSS_B[B_idx_ij]);
H2P_set_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, blk0[i], blk1[j], 0);
}
}
}
// Calculate a new HSS Bij matrix for pair (node0, node1)
// Input parameters:
// h2mat : Source H2 matrix structure
// node{0, 1} : Node pair
// S, V, W, Minv : Arrays, size h2mat->n_node, intermediate matrices used in H2P_SPDHSS_H2_build()
// HSS_B_p2i_{*} : CSR matrix array triple, convert (i, j) pair to an index for HSS_B
// HSS_B : New HSS Bij matrices
// Output parameters:
// HSS_B_p2i_{*} : Updated CSR matrix array triple
// HSS_B : Updated HSS_B matrices (some unused Bij matrices will be deleted)
void H2P_SPDHSS_H2_calc_HSS_Bij(
H2Pack_p h2mat, const int node0, const int node1, const int tid,
H2P_dense_mat_p *S, H2P_dense_mat_p *V, H2P_dense_mat_p *W, H2P_dense_mat_p *Minv,
const int *HSS_B_p2i_rowptr, const int *HSS_B_p2i_colidx, int *HSS_B_p2i_val, H2P_dense_mat_p *HSS_B
)
{
int pt_dim = h2mat->pt_dim;
int max_child = h2mat->max_child;
int n_node = h2mat->n_node;
int *node_level = h2mat->node_level;
int *n_child = h2mat->n_child;
int *children = h2mat->children;
DTYPE *enbox = h2mat->enbox;
int level0 = node_level[node0];
int level1 = node_level[node1];
int n_child0 = n_child[node0];
int n_child1 = n_child[node1];
int *child0 = children + node0 * max_child;
int *child1 = children + node1 * max_child;
DTYPE *enbox0 = enbox + node0 * 2 * pt_dim;
DTYPE *enbox1 = enbox + node1 * 2 * pt_dim;
int is_adm = H2P_check_box_admissible(enbox0, enbox1, pt_dim, ALPHA_H2);
H2P_thread_buf_p thread_buf = h2mat->tb[tid];
int HSS_B_idx = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, node0, node1);
ASSERT_PRINTF(HSS_B_idx > 0, "SPDHSS_B{%d, %d} does not exist!\n", node0, node1);
HSS_B_idx--;
H2P_dense_mat_init(&HSS_B[HSS_B_idx], 0, 0);
H2P_dense_mat_p HSS_Bij = HSS_B[HSS_B_idx];
if (level0 == level1)
{
// 1.1: node0 and node1 are admissible
if (is_adm)
{
H2P_dense_mat_p H2_Bij = thread_buf->mat0;
H2P_dense_mat_p tmpM = thread_buf->mat1;
H2_Bij->nrow = 0;
H2P_get_Bij_block(h2mat, node0, node1, H2_Bij);
if (H2_Bij->nrow == 0)
{
ERROR_PRINTF("Bug in case 1.1, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Bij);
H2P_dense_mat_reset(tmpM);
return;
}
H2P_dense_mat_p W0 = W[node0];
H2P_dense_mat_p W1 = W[node1];
// Bij = W{node1} * H2_B{H2_B_idx} * W{node2}';
H2P_dense_mat_resize(tmpM, H2_Bij->nrow, W1->nrow);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasTrans, H2_Bij->nrow, W1->nrow, H2_Bij->ncol,
1.0, H2_Bij->data, H2_Bij->ld, W1->data, W1->ld, 0.0, tmpM->data, tmpM->ld
);
H2P_dense_mat_resize(HSS_Bij, W0->nrow, tmpM->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, W0->nrow, tmpM->ncol, W0->ncol,
1.0, W0->data, W0->ld, tmpM->data, tmpM->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (is_adm)"
// Otherwise: node0 and node1 are inadmissible
// 1.2: Both nodes are leaf nodes
if (n_child0 == 0 && n_child1 == 0)
{
H2P_dense_mat_p H2_Dij = thread_buf->mat0;
H2P_dense_mat_p tmpM = thread_buf->mat1;
H2_Dij->nrow = 0;
H2P_get_Dij_block(h2mat, node0, node1, H2_Dij);
if (H2_Dij->nrow == 0)
{
ERROR_PRINTF("Bug in case 1.2, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Dij);
H2P_dense_mat_reset(tmpM);
return;
}
H2P_dense_mat_p S0 = S[node0];
H2P_dense_mat_p S1 = S[node1];
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p V1 = V[node1];
// tmpM = V{node1}' * linsolve(S{node1}, H2_D{H2_D_idx}, struct('LT', true));
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
H2_Dij->nrow, H2_Dij->ncol, 1.0, S0->data, S0->ld, H2_Dij->data, H2_Dij->ld
);
H2P_dense_mat_resize(tmpM, V0->ncol, H2_Dij->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, H2_Dij->ncol, V0->nrow,
1.0, V0->data, V0->ld, H2_Dij->data, H2_Dij->ld, 0.0, tmpM->data, tmpM->ld
);
// Bij = linsolve(S{node2}, tmpM', struct('LT', true))' * V{node2};
// S{node2} * X = tmpM', we need Bij = X' * V{node2}
// Solve X' * S{node2}' = tmpM to obtain X' directly
CBLAS_TRSM(
CblasRowMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit,
tmpM->nrow, tmpM->ncol, 1.0, S1->data, S1->ld, tmpM->data, tmpM->ld
);
H2P_dense_mat_resize(HSS_Bij, tmpM->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpM->nrow, V1->ncol, tmpM->ncol,
1.0, tmpM->data, tmpM->ld, V1->data, V1->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (n_child0 == 0 && n_child1 == 0)"
// 1.3: Both nodes are non-leaf nodes
if (n_child0 > 0 && n_child1 > 0)
{
H2P_dense_mat_p tmpB = thread_buf->mat0;
H2P_dense_mat_p tmpM0 = thread_buf->mat1;
H2P_dense_mat_p tmpM1 = thread_buf->mat2;
tmpB->nrow = 0;
H2P_SPDHSS_H2_gather_HSS_B(
n_child0, n_child1, child0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B, tmpB
);
if (tmpB->nrow == 0)
{
ERROR_PRINTF("Bug in case 1.3, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(tmpB);
H2P_dense_mat_reset(tmpM0);
H2P_dense_mat_reset(tmpM1);
return;
}
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p V1 = V[node1];
H2P_dense_mat_p Minv0 = Minv[node0];
H2P_dense_mat_p Minv1 = Minv[node1];
// Bij = V{node1}' * Minv{node1} * tmpB * Minv{node2} * V{node2};
H2P_dense_mat_resize(tmpM0, Minv1->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv1->nrow, V1->ncol, Minv1->ncol,
1.0, Minv1->data, Minv1->ld, V1->data, V1->ld, 0.0, tmpM0->data, tmpM0->ld
);
H2P_dense_mat_resize(tmpM1, tmpB->nrow, tmpM0->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpB->nrow, tmpM0->ncol, tmpB->ncol,
1.0, tmpB->data, tmpB->ld, tmpM0->data, tmpM0->ld, 0.0, tmpM1->data, tmpM1->ld
);
H2P_dense_mat_resize(tmpM0, Minv0->nrow, tmpM1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv0->nrow, tmpM1->ncol, Minv0->ncol,
1.0, Minv0->data, Minv0->ld, tmpM1->data, tmpM1->ld, 0.0, tmpM0->data, tmpM0->ld
);
H2P_dense_mat_resize(HSS_Bij, V0->ncol, tmpM0->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, tmpM0->ncol, V0->nrow,
1.0, V0->data, V0->ld, tmpM0->data, tmpM0->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
H2P_SPDHSS_H2_clean_HSS_B(
n_child0, n_child1, child0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
return;
} // End of "if (n_child0 > 0 && n_child1 > 0)"
// 1.4: node0 is non-leaf, node1 is leaf
if (n_child0 > 0 && n_child1 == 0)
{
H2P_dense_mat_p tmpB = thread_buf->mat0;
H2P_dense_mat_p tmpM0 = thread_buf->mat1;
H2P_dense_mat_p tmpM1 = thread_buf->mat2;
tmpB->nrow = 0;
H2P_SPDHSS_H2_gather_HSS_B(
n_child0, 1, child0, &node1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B, tmpB
);
if (tmpB->nrow == 0)
{
ERROR_PRINTF("Bug in case 1.4, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(tmpB);
H2P_dense_mat_reset(tmpM0);
H2P_dense_mat_reset(tmpM1);
return;
}
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p V1 = V[node1];
H2P_dense_mat_p S1 = S[node1];
H2P_dense_mat_p Minv0 = Minv[node0];
// tmpM1 = V{node1}' * Minv{node1} * tmpB;
H2P_dense_mat_resize(tmpM0, Minv0->nrow, tmpB->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv0->nrow, tmpB->ncol, Minv0->ncol,
1.0, Minv0->data, Minv0->ld, tmpB->data, tmpB->ld, 0.0, tmpM0->data, tmpM0->ld
);
H2P_dense_mat_resize(tmpM1, V0->ncol, tmpM0->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, tmpM0->ncol, V0->nrow,
1.0, V0->data, V0->ld, tmpM0->data, tmpM0->ld, 0.0, tmpM1->data, tmpM1->ld
);
// Bij = linsolve(S{node2}, tmpM1', struct('LT', true))' * V{node2};
// S{node2} * X = tmpM1', we need Bij = X' * V{node2}
// Solve X' * S{node2}' = tmpM1 to obtain X' directly
CBLAS_TRSM(
CblasRowMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit,
tmpM1->nrow, tmpM1->ncol, 1.0, S1->data, S1->ld, tmpM1->data, tmpM1->ld
);
H2P_dense_mat_resize(HSS_Bij, tmpM1->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpM1->nrow, V1->ncol, tmpM1->ncol,
1.0, tmpM1->data, tmpM1->ld, V1->data, V1->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
H2P_SPDHSS_H2_clean_HSS_B(
n_child0, 1, child0, &node1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
return;
} // End of "if (n_child0 > 0 && n_child1 == 0)"
// 1.5: node0 is leaf, node1 is non-leaf
if (n_child0 == 0 && n_child1 > 0)
{
H2P_dense_mat_p tmpB = thread_buf->mat0;
H2P_dense_mat_p tmpM0 = thread_buf->mat1;
H2P_dense_mat_p tmpM1 = thread_buf->mat2;
tmpB->nrow = 0;
H2P_SPDHSS_H2_gather_HSS_B(
1, n_child1, &node0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B, tmpB
);
if (tmpB->nrow == 0)
{
ERROR_PRINTF("Bug in case 1.5, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(tmpB);
H2P_dense_mat_reset(tmpM0);
H2P_dense_mat_reset(tmpM1);
return;
}
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p V1 = V[node1];
H2P_dense_mat_p S0 = S[node0];
H2P_dense_mat_p Minv1 = Minv[node1];
// tmpM1 = tmpB * Minv{node2} * V{node2};
H2P_dense_mat_resize(tmpM0, Minv1->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv1->nrow, V1->ncol, Minv1->ncol,
1.0, Minv1->data, Minv1->ld, V1->data, V1->ld, 0.0, tmpM0->data, tmpM0->ld
);
H2P_dense_mat_resize(tmpM1, tmpB->nrow, tmpM0->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpB->nrow, tmpM0->ncol, tmpB->ncol,
1.0, tmpB->data, tmpB->ld, tmpM0->data, tmpM0->ld, 0.0, tmpM1->data, tmpM1->ld
);
// Bij = V{node1}' * linsolve(S{node1}, tmpM1, struct('LT', true));
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
tmpM1->nrow, tmpM1->ncol, 1.0, S0->data, S0->ld, tmpM1->data, tmpM1->ld
);
H2P_dense_mat_resize(HSS_Bij, V0->ncol, tmpM1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, tmpM1->ncol, V0->nrow,
1.0, V0->data, V0->ld, tmpM1->data, tmpM1->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
H2P_SPDHSS_H2_clean_HSS_B(
1, n_child1, &node0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
return;
} // End of "if (n_child0 == 0 && n_child1 > 0)"
} // End of "if (level0 == level1)"
if (level0 > level1)
{
// Note: node1 must be a leaf node
if (n_child1 > 0)
{
ERROR_PRINTF("Bug in case 2, node pair (%d, %d)\n", node0, node1);
return;
} // End of "if (n_child1 > 0)"
// 2.1: node0 and node1 are admissible
if (is_adm)
{
H2P_dense_mat_p H2_Bij = thread_buf->mat0;
H2_Bij->nrow = 0;
H2P_get_Bij_block(h2mat, node0, node1, H2_Bij);
if (H2_Bij->nrow == 0)
{
ERROR_PRINTF("Bug in case 2.1, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Bij);
return;
}
H2P_dense_mat_p W0 = W[node0];
// Bij = W{node1} * H2_B{H2_B_idx};
H2P_dense_mat_resize(HSS_Bij, W0->nrow, H2_Bij->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, W0->nrow, H2_Bij->ncol, W0->ncol,
1.0, W0->data, W0->ld, H2_Bij->data, H2_Bij->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (is_adm)"
// Otherwise: node0 and node1 are inadmissible
// 2.2: node0 is a leaf node
if (n_child0 == 0)
{
H2P_dense_mat_p H2_Dij = thread_buf->mat0;
H2_Dij->nrow = 0;
H2P_get_Dij_block(h2mat, node0, node1, H2_Dij);
if (H2_Dij->nrow == 0)
{
ERROR_PRINTF("Bug in case 2.2, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Dij);
return;
}
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p S0 = S[node0];
// Bij = V{node1}' * linsolve(S{node1}, H2_Dij, struct('LT', true));
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
H2_Dij->nrow, H2_Dij->ncol, 1.0, S0->data, S0->ld, H2_Dij->data, H2_Dij->ld
);
H2P_dense_mat_resize(HSS_Bij, V0->ncol, H2_Dij->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, H2_Dij->ncol, V0->nrow,
1.0, V0->data, V0->ld, H2_Dij->data, H2_Dij->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (n_child0 == 0)"
// 2.3: node0 is a non-leaf node
if (n_child0 > 0)
{
H2P_dense_mat_p tmpB = thread_buf->mat0;
H2P_dense_mat_p tmpM = thread_buf->mat1;
tmpB->nrow = 0;
H2P_SPDHSS_H2_gather_HSS_B(
n_child0, 1, child0, &node1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B, tmpB
);
if (tmpB->nrow == 0)
{
ERROR_PRINTF("Bug in case 2.3, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(tmpB);
H2P_dense_mat_reset(tmpM);
return;
}
H2P_dense_mat_p V0 = V[node0];
H2P_dense_mat_p Minv0 = Minv[node0];
// Bij = V{node1}' * Minv{node1} * tmpB;
H2P_dense_mat_resize(tmpM, Minv0->nrow, tmpB->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv0->nrow, tmpB->ncol, Minv0->ncol,
1.0, Minv0->data, Minv0->ld, tmpB->data, tmpB->ld, 0.0, tmpM->data, tmpM->ld
);
H2P_dense_mat_resize(HSS_Bij, V0->ncol, tmpM->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V0->ncol, tmpM->ncol, V0->nrow,
1.0, V0->data, V0->ld, tmpM->data, tmpM->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
H2P_SPDHSS_H2_clean_HSS_B(
n_child0, 1, child0, &node1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
return;
} // End of "if (n_child0 > 0)"
} // End of "if (level0 > level1)"
if (level0 < level1)
{
// Note: node0 must be a leaf node
if (n_child0 > 0)
{
ERROR_PRINTF("Bug in case 3, node pair (%d, %d)\n", node0, node1);
return;
}
// 3.1: node0 and node1 are admissable
if (is_adm)
{
H2P_dense_mat_p H2_Bij = thread_buf->mat0;
H2P_dense_mat_resize(H2_Bij, 128, 128);
H2_Bij->nrow = 0;
H2P_get_Bij_block(h2mat, node0, node1, H2_Bij);
if (H2_Bij->nrow == 0)
{
ERROR_PRINTF("Bug in case 3.1, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Bij);
return;
}
H2P_dense_mat_p W1 = W[node1];
// Bij = H2_B{H2_B_idx} * W{node2}';
H2P_dense_mat_resize(HSS_Bij, H2_Bij->nrow, W1->nrow);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasTrans, H2_Bij->nrow, W1->nrow, H2_Bij->ncol,
1.0, H2_Bij->data, H2_Bij->ld, W1->data, W1->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (is_adm)"
// Otherwise: node0 and node1 are inadmissible
// 3.2: node1 is a leaf node
if (n_child1 == 0)
{
H2P_dense_mat_p H2_Dij = thread_buf->mat0;
H2_Dij->nrow = 0;
H2P_get_Dij_block(h2mat, node0, node1, H2_Dij);
if (H2_Dij->nrow == 0)
{
ERROR_PRINTF("Bug in case 3.2, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(H2_Dij);
return;
}
H2P_dense_mat_p V1 = V[node1];
H2P_dense_mat_p S1 = S[node1];
// Bij = linsolve(S{node2}, H2_Dij', struct('LT', true))' * V{node2};
// S{node2} * X = H2_Dij', we need Bij = X' * V{node2}
// Solve X' * S{node2}' = H2_Dij to obtain X' directly
CBLAS_TRSM(
CblasRowMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit,
H2_Dij->nrow, H2_Dij->ncol, 1.0, S1->data, S1->ld, H2_Dij->data, H2_Dij->ld
);
H2P_dense_mat_resize(HSS_Bij, H2_Dij->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, H2_Dij->nrow, V1->ncol, H2_Dij->ncol,
1.0, H2_Dij->data, H2_Dij->ld, V1->data, V1->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
return;
} // End of "if (n_child1 == 0)"
// 3.3: node1 is a non-leaf node
if (n_child1 > 0)
{
H2P_dense_mat_p tmpB = thread_buf->mat0;
H2P_dense_mat_p tmpM = thread_buf->mat1;
tmpB->nrow = 0;
H2P_SPDHSS_H2_gather_HSS_B(
1, n_child1, &node0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B, tmpB
);
if (tmpB->nrow == 0)
{
ERROR_PRINTF("Bug in case 3.3, node pair (%d, %d)\n", node0, node1);
H2P_dense_mat_reset(tmpB);
H2P_dense_mat_reset(tmpM);
return;
}
H2P_dense_mat_p V1 = V[node1];
H2P_dense_mat_p Minv1 = Minv[node1];
// Bij = tmpB * Minv{node2} * V{node2};
H2P_dense_mat_resize(tmpM, Minv1->nrow, V1->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv1->nrow, V1->ncol, Minv1->ncol,
1.0, Minv1->data, Minv1->ld, V1->data, V1->ld, 0.0, tmpM->data, tmpM->ld
);
H2P_dense_mat_resize(HSS_Bij, tmpB->nrow, tmpM->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpB->nrow, tmpM->ncol, tmpB->ncol,
1.0, tmpB->data, tmpB->ld, tmpM->data, tmpM->ld, 0.0, HSS_Bij->data, HSS_Bij->ld
);
H2P_SPDHSS_H2_clean_HSS_B(
1, n_child1, &node0, child1, n_node,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
return;
} // End of "if (n_child1 > 0)"
} // End of "if (level0 < level1)"
}
// Construct the list of blocks (i, j) at each level satisfying
// (1) (i, j) are inadmissible pairs;
// (2) (i, j) are admissible but their parents are inadmissible.
// If (i, j) are at different levels, block (i, j) is contained and
// processed in the lower level (closer to leaf level) of the two.
// Input parameter:
// h2mat : Source H2 matrix
// Output parameters:
// *level_HSS_Bij_pairs_ : Array, size h2mat->max_level+1, new HSS Bij pairs on each level
// *n_HSS_Bij_pair_ : Total number of new HSS Bij pairs
void H2P_SPDHSS_H2_get_level_HSS_Bij_pairs(H2Pack_p h2mat, H2P_int_vec_p **level_HSS_Bij_pairs_, int *n_HSS_Bij_pair_)
{
int n_node = h2mat->n_node;
int max_level = h2mat->max_level;
int H2_n_r_adm_pairs = h2mat->n_r_adm_pair;
int H2_n_r_inadm_pairs = h2mat->n_r_inadm_pair;
int *parent = h2mat->parent;
int *node_level = h2mat->node_level;
int *H2_r_adm_pairs = h2mat->r_adm_pairs;
int *H2_r_inadm_pairs = h2mat->r_inadm_pairs;
int n_level = max_level + 1; // This is the total number of levels
H2P_int_vec_p *level_HSS_Bij_pairs = (H2P_int_vec_p*) malloc(sizeof(H2P_int_vec_p) * n_level);
int *inadm_max_level = (int*) malloc(sizeof(int) * H2_n_r_inadm_pairs);
int *adm_max_level = (int*) malloc(sizeof(int) * H2_n_r_adm_pairs);
ASSERT_PRINTF(
level_HSS_Bij_pairs != NULL && inadm_max_level != NULL && adm_max_level != NULL,
"Failed to allocate arrays for storing new SPDHSS Bij pairs\n"
);
// inadm_max_lvl = max(node_lvl(H2_r_near_pair), [], 2);
// adm_max_lvl = max(node_lvl(H2_r_far_pair), [], 2);
for (int i = 0; i < H2_n_r_inadm_pairs; i++)
{
int node0 = H2_r_inadm_pairs[2 * i];
int node1 = H2_r_inadm_pairs[2 * i + 1];
inadm_max_level[i] = MAX(node_level[node0], node_level[node1]);
}
for (int i = 0; i < H2_n_r_adm_pairs; i++)
{
int node0 = H2_r_adm_pairs[2 * i];
int node1 = H2_r_adm_pairs[2 * i + 1];
adm_max_level[i] = MAX(node_level[node0], node_level[node1]);
}
// inadm_pairs = H2_r_near_pair(inadm_max_lvl == max_level, :);
// adm_pairs = H2_r_far_pair(adm_max_lvl == max_level, :);
int n_HSS_Bij_pair = 0;
H2P_int_vec_init(&level_HSS_Bij_pairs[max_level], 1024);
H2P_int_vec_p level_pairs = level_HSS_Bij_pairs[max_level];
for (int i = 0; i < H2_n_r_inadm_pairs; i++)
{
if (inadm_max_level[i] != max_level) continue;
H2P_int_vec_push_back(level_pairs, H2_r_inadm_pairs[2 * i]);
H2P_int_vec_push_back(level_pairs, H2_r_inadm_pairs[2 * i + 1]);
}
for (int i = 0; i < H2_n_r_adm_pairs; i++)
{
if (adm_max_level[i] != max_level) continue;
H2P_int_vec_push_back(level_pairs, H2_r_adm_pairs[2 * i]);
H2P_int_vec_push_back(level_pairs, H2_r_adm_pairs[2 * i + 1]);
}
n_HSS_Bij_pair += level_pairs->length / 2;
H2P_int_vec_p prev_pairs, prev_pairs1, work_buf;
H2P_int_vec_init(&prev_pairs, 1024);
H2P_int_vec_init(&prev_pairs1, 1024);
H2P_int_vec_init(&work_buf, 1024);
for (int i = max_level - 1; i >= 1; i--)
{
H2P_int_vec_p prev_pairs0 = level_HSS_Bij_pairs[i + 1];
H2P_int_vec_set_capacity(prev_pairs, prev_pairs0->length);
memcpy(prev_pairs->data, prev_pairs0->data, sizeof(int) * prev_pairs0->length);
prev_pairs->length = prev_pairs0->length;
// Handling partial admissible pairs
for (int k = 0; k < prev_pairs->length; k++)
{
if (node_level[prev_pairs->data[k]] == i + 1)
prev_pairs->data[k] = parent[prev_pairs->data[k]];
}
// prev_pairs = prev_pairs(prev_pairs(:, 1) ~= prev_pairs(:, 2), :);
// prev_pairs = unique(prev_pairs, 'rows');
H2P_int_vec_set_capacity(work_buf, prev_pairs->length);
int n_prev_pair = prev_pairs->length / 2;
int *key = work_buf->data;
int *val = work_buf->data + n_prev_pair;
int *pp_data = prev_pairs->data;
int valid_cnt = 0;
for (int k = 0; k < n_prev_pair; k++)
{
int node0 = pp_data[2 * k];
int node1 = pp_data[2 * k + 1];
if (node0 == node1) continue;
key[valid_cnt] = node0 * n_node + node1;
val[valid_cnt] = k;
valid_cnt++;
}
H2P_qsort_int_key_val(key, val, 0, valid_cnt - 1);
H2P_int_vec_set_capacity(prev_pairs1, valid_cnt * 2);
int *pp1_data = prev_pairs1->data;
int cnt = 0, curr_key = -19241112;
for (int k = 0; k < valid_cnt; k++)
{
if (curr_key != key[k])
{
curr_key = key[k];
int pair_k = val[k];
int node0 = pp_data[2 * pair_k];
int node1 = pp_data[2 * pair_k + 1];
pp1_data[2 * cnt] = node0;
pp1_data[2 * cnt + 1] = node1;
cnt++;
}
} // End of k loop
prev_pairs1->length = 2 * cnt;
// level_HSS_Bij_pairs{i} = [prev_pairs; inadm_blks; adm_blks];
H2P_int_vec_init(&level_HSS_Bij_pairs[i], 1024);
level_pairs = level_HSS_Bij_pairs[i];
for (int k = 0; k < prev_pairs1->length; k++)
H2P_int_vec_push_back(level_pairs, prev_pairs1->data[k]);
for (int k = 0; k < H2_n_r_inadm_pairs; k++)
{
if (inadm_max_level[k] != i) continue;
H2P_int_vec_push_back(level_pairs, H2_r_inadm_pairs[2 * k]);
H2P_int_vec_push_back(level_pairs, H2_r_inadm_pairs[2 * k + 1]);
}
for (int k = 0; k < H2_n_r_adm_pairs; k++)
{
if (adm_max_level[k] != i) continue;
H2P_int_vec_push_back(level_pairs, H2_r_adm_pairs[2 * k]);
H2P_int_vec_push_back(level_pairs, H2_r_adm_pairs[2 * k + 1]);
}
n_HSS_Bij_pair += level_pairs->length / 2;
} // End of i loop
level_HSS_Bij_pairs[0] = NULL;
H2P_int_vec_destroy(&prev_pairs);
H2P_int_vec_destroy(&prev_pairs1);
H2P_int_vec_destroy(&work_buf);
free(inadm_max_level);
free(adm_max_level);
*level_HSS_Bij_pairs_ = level_HSS_Bij_pairs;
*n_HSS_Bij_pair_ = n_HSS_Bij_pair;
}
// Wrap up the new HSS matrix with calculated HSS_{U, B, D} and existing hierarchical tree information
// Input parameters:
// h2mat : Source H2 matrix
// HSS_{U, B, D} : New U/B/D matrices calculated in H2P_SPDHSS_H2_build()
// HSS_B_p2i_{*} : CSR matrix array triple, convert (i, j) pair to an index for HSS_B
// HSS_D_pair2idx : Size h2mat->n_node, convert (i, i) pair to an index for HSS_D
// Output parameter:
// *hssmat_ : New HSS matrix
void H2P_SPDHSS_H2_wrap_new_HSS(
H2Pack_p h2mat, H2P_dense_mat_p *HSS_U, H2P_dense_mat_p *HSS_B, H2P_dense_mat_p *HSS_D,
const int *HSS_B_p2i_rowptr, const int *HSS_B_p2i_colidx, const int *HSS_B_p2i_val,
const int *HSS_D_pair2idx, H2Pack_p *hssmat_
)
{
H2Pack_p hssmat;
H2P_init(&hssmat, h2mat->pt_dim, h2mat->krnl_dim, h2mat->QR_stop_type, &h2mat->QR_stop_tol);
int pt_dim = h2mat->pt_dim;
int n_point = h2mat->n_point;
int n_node = h2mat->n_node;
int max_child = h2mat->max_child;
int max_level = h2mat->max_level;
// 1. Copy point coordinates
hssmat->n_point = n_point;
hssmat->max_leaf_points = h2mat->max_leaf_points;
hssmat->max_leaf_size = h2mat->max_leaf_size;
size_t coord_msize = sizeof(DTYPE) * n_point * pt_dim;
hssmat->coord_idx = (int*) malloc(sizeof(int) * n_point);
hssmat->coord = (DTYPE*) malloc(coord_msize);
memcpy(hssmat->coord_idx, h2mat->coord_idx, sizeof(int) * n_point);
memcpy(hssmat->coord, h2mat->coord, coord_msize);
// 2. Copy hierarchical partition tree information
hssmat->n_node = n_node;
hssmat->root_idx = n_node - 1;
hssmat->n_leaf_node = h2mat->n_leaf_node;
hssmat->max_level = h2mat->max_level;
hssmat->krnl_mat_size = h2mat->krnl_mat_size;
size_t int_n_node_msize = sizeof(int) * n_node;
size_t int_n_level_msize = sizeof(int) * (max_level + 1);
size_t enbox_msize = sizeof(DTYPE) * n_node * 2 * pt_dim;
hssmat->parent = malloc(int_n_node_msize);
hssmat->children = malloc(int_n_node_msize * max_child);
hssmat->pt_cluster = malloc(int_n_node_msize * 2);
hssmat->mat_cluster = malloc(int_n_node_msize * 2);
hssmat->n_child = malloc(int_n_node_msize);
hssmat->node_level = malloc(int_n_node_msize);
hssmat->node_height = malloc(int_n_node_msize);
hssmat->level_n_node = malloc(int_n_level_msize);
hssmat->level_nodes = malloc(int_n_level_msize * h2mat->n_leaf_node);
hssmat->height_n_node = malloc(int_n_level_msize);
hssmat->height_nodes = malloc(int_n_level_msize * h2mat->n_leaf_node);
hssmat->enbox = malloc(enbox_msize);
ASSERT_PRINTF(hssmat->parent != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->children != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->pt_cluster != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->mat_cluster != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->n_child != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->node_level != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->node_height != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->level_n_node != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->level_nodes != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->height_n_node != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->height_nodes != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
ASSERT_PRINTF(hssmat->enbox != NULL, "Failed to allocate arrays for storing hierarchical partioning tree in SPDHSS\n");
memcpy(hssmat->parent , h2mat->parent , int_n_node_msize);
memcpy(hssmat->children , h2mat->children , int_n_node_msize * max_child);
memcpy(hssmat->pt_cluster , h2mat->pt_cluster , int_n_node_msize * 2);
memcpy(hssmat->mat_cluster , h2mat->mat_cluster , int_n_node_msize * 2);
memcpy(hssmat->n_child , h2mat->n_child , int_n_node_msize);
memcpy(hssmat->node_level , h2mat->node_level , int_n_node_msize);
memcpy(hssmat->node_height , h2mat->node_height , int_n_node_msize);
memcpy(hssmat->level_n_node , h2mat->level_n_node , int_n_level_msize);
memcpy(hssmat->level_nodes , h2mat->level_nodes , int_n_level_msize * h2mat->n_leaf_node);
memcpy(hssmat->height_n_node, h2mat->height_n_node, int_n_level_msize);
memcpy(hssmat->height_nodes , h2mat->height_nodes , int_n_level_msize * h2mat->n_leaf_node);
memcpy(hssmat->enbox , h2mat->enbox , enbox_msize);
// 3. Copy H2 & HSS reduced (in)admissible pairs
hssmat->min_adm_level = h2mat->min_adm_level;
hssmat->HSS_min_adm_level = h2mat->HSS_min_adm_level;
hssmat->n_r_inadm_pair = h2mat->n_r_inadm_pair;
hssmat->n_r_adm_pair = h2mat->n_r_adm_pair;
hssmat->HSS_n_r_inadm_pair = h2mat->HSS_n_r_inadm_pair;
hssmat->HSS_n_r_adm_pair = h2mat->HSS_n_r_adm_pair;
size_t r_inadm_pairs_msize = sizeof(int) * h2mat->n_r_inadm_pair * 2;
size_t r_adm_pairs_msize = sizeof(int) * h2mat->n_r_adm_pair * 2;
size_t HSS_r_inadm_pairs_msize = sizeof(int) * h2mat->HSS_n_r_inadm_pair * 2;
size_t HSS_r_adm_pairs_msize = sizeof(int) * h2mat->HSS_n_r_adm_pair * 2;
hssmat->r_inadm_pairs = (int*) malloc(r_inadm_pairs_msize);
hssmat->r_adm_pairs = (int*) malloc(r_adm_pairs_msize);
hssmat->HSS_r_inadm_pairs = (int*) malloc(HSS_r_inadm_pairs_msize);
hssmat->HSS_r_adm_pairs = (int*) malloc(HSS_r_adm_pairs_msize);
hssmat->node_inadm_lists = (int*) malloc(int_n_node_msize * h2mat->max_neighbor);
hssmat->node_n_r_inadm = (int*) malloc(int_n_node_msize);
hssmat->node_n_r_adm = (int*) malloc(int_n_node_msize);
ASSERT_PRINTF(hssmat->r_inadm_pairs != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->r_adm_pairs != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->HSS_r_inadm_pairs != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->HSS_r_adm_pairs != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->node_inadm_lists != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->node_n_r_inadm != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
ASSERT_PRINTF(hssmat->node_n_r_adm != NULL, "Failed to allocate arrays for storing (in)admissible pairs in SPDHSS\n");
memcpy(hssmat->r_inadm_pairs , h2mat->r_inadm_pairs , r_inadm_pairs_msize);
memcpy(hssmat->r_adm_pairs , h2mat->r_adm_pairs , r_adm_pairs_msize);
memcpy(hssmat->HSS_r_inadm_pairs, h2mat->HSS_r_inadm_pairs, HSS_r_inadm_pairs_msize);
memcpy(hssmat->HSS_r_adm_pairs , h2mat->HSS_r_adm_pairs , HSS_r_adm_pairs_msize);
memcpy(hssmat->node_inadm_lists , h2mat->node_inadm_lists , int_n_node_msize * h2mat->max_neighbor);
memcpy(hssmat->node_n_r_inadm , h2mat->node_n_r_inadm , int_n_node_msize);
memcpy(hssmat->node_n_r_adm , h2mat->node_n_r_adm , int_n_node_msize);
// 4. Initialize thread-local buffer
hssmat->tb = (H2P_thread_buf_p*) malloc(sizeof(H2P_thread_buf_p) * hssmat->n_thread);
ASSERT_PRINTF(hssmat->tb != NULL, "Failed to allocate %d thread buffers in SPDHSS\n", hssmat->n_thread);
for (int i = 0; i < hssmat->n_thread; i++)
H2P_thread_buf_init(&hssmat->tb[i], hssmat->krnl_mat_size);
// 5. Set up kernel pointers and U/B/D info
hssmat->BD_JIT = 0;
hssmat->is_HSS = 1;
hssmat->krnl_param = h2mat->krnl_param;
hssmat->krnl_eval = h2mat->krnl_eval;
hssmat->krnl_bimv = h2mat->krnl_bimv;
hssmat->krnl_bimv_flops = h2mat->krnl_bimv_flops;
int n_thread = hssmat->n_thread;
int n_leaf_node = hssmat->n_leaf_node;
int *leaf_nodes = hssmat->height_nodes;
size_t *mat_size = hssmat->mat_size;
int BD_ntask_thread = (hssmat->BD_JIT == 1) ? BD_NTASK_THREAD : 1;
// 5.1 Copy U matrices directly
hssmat->n_UJ = h2mat->n_UJ;
hssmat->U = HSS_U;
for (int i = 0; i < n_node; i++)
{
if (HSS_U[i] != NULL)
{
mat_size[U_SIZE_IDX] += HSS_U[i]->nrow * HSS_U[i]->ncol;
mat_size[MV_FWD_SIZE_IDX] += HSS_U[i]->nrow * HSS_U[i]->ncol;
mat_size[MV_FWD_SIZE_IDX] += HSS_U[i]->nrow + HSS_U[i]->ncol;
mat_size[MV_BWD_SIZE_IDX] += HSS_U[i]->nrow * HSS_U[i]->ncol;
mat_size[MV_BWD_SIZE_IDX] += HSS_U[i]->nrow + HSS_U[i]->ncol;
} else {
H2P_dense_mat_init(&HSS_U[i], 0, 0);
HSS_U[i]->nrow = 0;
HSS_U[i]->ncol = 0;
HSS_U[i]->ld = 0;
}
}
// 5.2 Copy B matrices
int HSS_n_r_adm_pair = hssmat->HSS_n_r_adm_pair;
int *HSS_r_adm_pairs = hssmat->HSS_r_adm_pairs;
size_t int_r_adm_pairs_msize = sizeof(int) * HSS_n_r_adm_pair;
int *B_pair_i = (int*) malloc(int_r_adm_pairs_msize * 2);
int *B_pair_j = (int*) malloc(int_r_adm_pairs_msize * 2);
int *B_pair_v = (int*) malloc(int_r_adm_pairs_msize * 2);
int *B_nrow = (int*) malloc(int_r_adm_pairs_msize);
int *B_ncol = (int*) malloc(int_r_adm_pairs_msize);
size_t *B_ptr = (size_t*) malloc(sizeof(size_t) * (HSS_n_r_adm_pair + 1));
ASSERT_PRINTF(
B_nrow != NULL && B_ncol != NULL && B_ptr != NULL,
"Failed to allocate %d SPDHSS B matrices infomation array\n", HSS_n_r_adm_pair
);
ASSERT_PRINTF(
B_pair_i != NULL && B_pair_j != NULL && B_pair_v != NULL,
"Failed to allocate working buffer for SPDHSS B matrices indexing\n"
);
hssmat->n_B = HSS_n_r_adm_pair;
hssmat->B_nrow = B_nrow;
hssmat->B_ncol = B_ncol;
hssmat->B_ptr = B_ptr;
int B_pair_cnt = 0;
size_t B_total_size = 0;
B_ptr[0] = 0;
for (int i = 0; i < HSS_n_r_adm_pair; i++)
{
int node0 = HSS_r_adm_pairs[2 * i];
int node1 = HSS_r_adm_pairs[2 * i + 1];
int HSS_B_idx = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, node0, node1);
ASSERT_PRINTF(HSS_B_idx > 0, "SPDHSS_B{%d, %d} does not exist!\n", node0, node1);
HSS_B_idx--;
H2P_dense_mat_p HSS_Bi = HSS_B[HSS_B_idx];
B_nrow[i] = HSS_Bi->nrow;
B_ncol[i] = HSS_Bi->ncol;
size_t Bi_size = (size_t) B_nrow[i] * (size_t) B_ncol[i];
B_total_size += Bi_size;
B_ptr[i + 1] = Bi_size;
B_pair_i[B_pair_cnt] = node0;
B_pair_j[B_pair_cnt] = node1;
B_pair_v[B_pair_cnt] = i + 1;
B_pair_cnt++;
mat_size[MV_MID_SIZE_IDX] += B_nrow[i] * B_ncol[i];
mat_size[MV_MID_SIZE_IDX] += 2 * (B_nrow[i] + B_ncol[i]);
}
H2P_int_vec_p B_blk = hssmat->B_blk;
H2P_partition_workload(HSS_n_r_adm_pair, B_ptr + 1, B_total_size, n_thread * BD_ntask_thread, B_blk);
for (int i = 1; i <= HSS_n_r_adm_pair; i++) B_ptr[i] += B_ptr[i - 1];
mat_size[B_SIZE_IDX] = B_total_size;
hssmat->B_p2i_rowptr = (int*) malloc(sizeof(int) * (n_node + 1));
hssmat->B_p2i_colidx = (int*) malloc(int_r_adm_pairs_msize * 2);
hssmat->B_p2i_val = (int*) malloc(int_r_adm_pairs_msize * 2);
ASSERT_PRINTF(hssmat->B_p2i_rowptr != NULL, "Failed to allocate arrays for SPDHSS B matrices indexing\n");
ASSERT_PRINTF(hssmat->B_p2i_colidx != NULL, "Failed to allocate arrays for SPDHSS B matrices indexing\n");
ASSERT_PRINTF(hssmat->B_p2i_val != NULL, "Failed to allocate arrays for SPDHSS B matrices indexing\n");
H2P_int_COO_to_CSR(
n_node, B_pair_cnt, B_pair_i, B_pair_j, B_pair_v,
hssmat->B_p2i_rowptr, hssmat->B_p2i_colidx, hssmat->B_p2i_val
);
hssmat->B_data = (DTYPE*) malloc_aligned(sizeof(DTYPE) * B_total_size, 64);
ASSERT_PRINTF(hssmat->B_data != NULL, "Failed to allocate space for storing all %zu SPDHSS B matrices elements\n", B_total_size);
DTYPE *B_data = hssmat->B_data;
const int n_B_blk = B_blk->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
//#pragma omp for schedule(dynamic) nowait
//for (int i_blk = 0; i_blk < n_B_blk; i_blk++)
int i_blk = tid; // Use first-touch policy for better NUMA memory access performance
{
int B_blk_s = B_blk->data[i_blk];
int B_blk_e = B_blk->data[i_blk + 1];
if (i_blk >= n_B_blk)
{
B_blk_s = 0;
B_blk_e = 0;
}
for (int i = B_blk_s; i < B_blk_e; i++)
{
int node0 = HSS_r_adm_pairs[2 * i];
int node1 = HSS_r_adm_pairs[2 * i + 1];
int HSS_B_idx = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, node0, node1);
ASSERT_PRINTF(HSS_B_idx != 0, "SPDHSS_B{%d, %d} does not exist!\n", node0, node1);
HSS_B_idx--;
H2P_dense_mat_p HSS_Bi = HSS_B[HSS_B_idx];
int Bi_nrow = HSS_Bi->nrow;
int Bi_ncol = HSS_Bi->ncol;
DTYPE *Bi = B_data + B_ptr[i];
copy_matrix_block(sizeof(DTYPE), Bi_nrow, Bi_ncol, HSS_Bi->data, HSS_Bi->ld, Bi, Bi_ncol);
}
} // End of i_blk loop
} // End of "#pragma omp parallel"
// 5.3 Copy D matrices
size_t int_n_leaf_node_msize = sizeof(int) * n_leaf_node;
int *D_pair_i = (int*) malloc(int_n_leaf_node_msize * 2);
int *D_pair_j = (int*) malloc(int_n_leaf_node_msize * 2);
int *D_pair_v = (int*) malloc(int_n_leaf_node_msize * 2);
int *D_nrow = (int*) malloc(int_n_leaf_node_msize);
int *D_ncol = (int*) malloc(int_n_leaf_node_msize);
size_t *D_ptr = (size_t*) malloc(sizeof(size_t) * (n_leaf_node + 1));
ASSERT_PRINTF(
D_nrow != NULL && D_ncol != NULL && D_ptr != NULL,
"Failed to allocate %d SPDHSS D matrices information array\n", n_leaf_node
);
ASSERT_PRINTF(
D_pair_i != NULL && D_pair_j != NULL && D_pair_v != NULL,
"Failed to allocate working buffer for SPDHSS D matrices indexing\n"
);
hssmat->n_D = n_leaf_node;
hssmat->D_nrow = D_nrow;
hssmat->D_ncol = D_ncol;
hssmat->D_ptr = D_ptr;
int D_pair_cnt = 0;
size_t D_total_size = 0;
D_ptr[0] = 0;
for (int i = 0; i < n_leaf_node; i++)
{
int node = leaf_nodes[i];
int HSS_D_idx = HSS_D_pair2idx[node];
ASSERT_PRINTF(HSS_D_idx >= 0, "SPDHSS_D{%d, %d} does not exist!\n", node, node);
H2P_dense_mat_p HSS_Di = HSS_D[HSS_D_idx];
D_nrow[i] = HSS_Di->nrow;
D_ncol[i] = HSS_Di->ncol;
size_t Di_size = (size_t) D_nrow[i] * (size_t) D_ncol[i];
D_total_size += Di_size;
D_ptr[i + 1] = Di_size;
D_pair_i[D_pair_cnt] = node;
D_pair_j[D_pair_cnt] = node;
D_pair_v[D_pair_cnt] = i + 1;
D_pair_cnt++;
mat_size[MV_DEN_SIZE_IDX] += D_nrow[i] * D_ncol[i];
mat_size[MV_DEN_SIZE_IDX] += 2 * (D_nrow[i] + D_ncol[i]);
}
H2P_int_vec_p D_blk0 = hssmat->D_blk0;
H2P_int_vec_p D_blk1 = hssmat->D_blk1;
H2P_partition_workload(n_leaf_node, D_ptr + 1, D_total_size, n_thread * BD_ntask_thread, D_blk0);
for (int i = 1; i <= n_leaf_node; i++) D_ptr[i] += D_ptr[i - 1];
D_blk1->length = 1;
D_blk1->data[0] = 0;
mat_size[D_SIZE_IDX] = D_total_size;
hssmat->D_p2i_rowptr = (int*) malloc(sizeof(int) * (n_node + 1));
hssmat->D_p2i_colidx = (int*) malloc(int_n_leaf_node_msize * 2);
hssmat->D_p2i_val = (int*) malloc(int_n_leaf_node_msize * 2);
ASSERT_PRINTF(hssmat->D_p2i_rowptr != NULL, "Failed to allocate arrays for SPDHSS D matrices indexing\n");
ASSERT_PRINTF(hssmat->D_p2i_colidx != NULL, "Failed to allocate arrays for SPDHSS D matrices indexing\n");
ASSERT_PRINTF(hssmat->D_p2i_val != NULL, "Failed to allocate arrays for SPDHSS D matrices indexing\n");
H2P_int_COO_to_CSR(
n_node, D_pair_cnt, D_pair_i, D_pair_j, D_pair_v,
hssmat->D_p2i_rowptr, hssmat->D_p2i_colidx, hssmat->D_p2i_val
);
hssmat->D_data = (DTYPE*) malloc_aligned(sizeof(DTYPE) * D_total_size, 64);
ASSERT_PRINTF(hssmat->D_data != NULL, "Failed to allocate space for storing all %zu SPDHSS B matrices elements\n", D_total_size);
DTYPE *D_data = hssmat->D_data;
const int n_D0_blk = D_blk0->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
//#pragma omp for schedule(dynamic) nowait
//for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++)
int i_blk0 = tid; // Use first-touch policy for better NUMA memory access performance
{
int D_blk0_s = D_blk0->data[i_blk0];
int D_blk0_e = D_blk0->data[i_blk0 + 1];
if (i_blk0 >= n_D0_blk)
{
D_blk0_s = 0;
D_blk0_e = 0;
}
for (int i = D_blk0_s; i < D_blk0_e; i++)
{
int node = leaf_nodes[i];
int HSS_D_idx = HSS_D_pair2idx[node];
H2P_dense_mat_p HSS_Di = HSS_D[HSS_D_idx];
int Di_nrow = HSS_Di->nrow;
int Di_ncol = HSS_Di->ncol;
DTYPE *Di = D_data + D_ptr[i];
copy_matrix_block(sizeof(DTYPE), Di_nrow, Di_ncol, HSS_Di->data, HSS_Di->ld, Di, Di_ncol);
}
} // End of i_blk0 loop
} // End of "#pragma omp parallel"
// 6. Copy environment variables & permutation indices
hssmat->mm_max_n_vec = h2mat->mm_max_n_vec;
hssmat->print_timers = h2mat->print_timers;
hssmat->print_dbginfo = h2mat->print_dbginfo;
size_t pmt_idx_msize = sizeof(int) * h2mat->krnl_mat_size;
int *fwd_pmt_idx = (int*) malloc(pmt_idx_msize);
int *bwd_pmt_idx = (int*) malloc(pmt_idx_msize);
memcpy(fwd_pmt_idx, h2mat->fwd_pmt_idx, pmt_idx_msize);
memcpy(bwd_pmt_idx, h2mat->bwd_pmt_idx, pmt_idx_msize);
hssmat->fwd_pmt_idx = fwd_pmt_idx;
hssmat->bwd_pmt_idx = bwd_pmt_idx;
size_t krnl_mat_msize = sizeof(DTYPE) * hssmat->krnl_mat_size;
hssmat->xT = (DTYPE*) malloc(krnl_mat_msize);
hssmat->yT = (DTYPE*) malloc(krnl_mat_msize);
hssmat->pmt_x = (DTYPE*) malloc(krnl_mat_msize * hssmat->mm_max_n_vec);
hssmat->pmt_y = (DTYPE*) malloc(krnl_mat_msize * hssmat->mm_max_n_vec);
ASSERT_PRINTF(
hssmat->xT != NULL && hssmat->yT != NULL && hssmat->pmt_x != NULL && hssmat->pmt_y != NULL,
"Failed to allocate working arrays of size %d for matvec & matmul\n", 2 * hssmat->krnl_mat_size * (hssmat->mm_max_n_vec+1)
);
*hssmat_ = hssmat;
}
// Build an SPD HSS matrix A_{HSS} from an H2 matrix s.t. A_{HSS} ~= A_{H2}
void H2P_SPDHSS_H2_build(
const int max_rank, const DTYPE reltol, const DTYPE shift,
H2Pack_p h2mat, H2Pack_p *hssmat_
)
{
if (h2mat == NULL || h2mat->U == NULL || h2mat->is_HSS)
{
ERROR_PRINTF("Input h2mat is not constructed or configured as HSS\n");
return;
}
if (h2mat->is_RPY_Ewald)
{
ERROR_PRINTF("Cannot construct SPDHSS for RPY Ewald kernel\n");
return;
}
#ifdef __linux__
// Any H2P_dense_mat_t->data allocation > 1KB will use mmap instead of sbrk and can be released later
mallopt(M_MMAP_THRESHOLD, 1024);
#endif
int n_node = h2mat->n_node;
int n_thread = h2mat->n_thread;
int n_leaf_node = h2mat->n_leaf_node;
int max_child = h2mat->max_child;
int max_level = h2mat->max_level;
int *children = h2mat->children;
int *n_child = h2mat->n_child;
int *level_n_node = h2mat->level_n_node;
int *level_nodes = h2mat->level_nodes;
int *node_level = h2mat->node_level;
int *leaf_nodes = h2mat->height_nodes;
H2P_dense_mat_p *H2_U = h2mat->U;
H2P_thread_buf_p *thread_buf = h2mat->tb;
int n_level = max_level + 1; // This is the total number of levels
double st, et, build_U_t = 0.0, build_B_t = 0.0, build_D_t = 0.0;
// 1. Accumulate off-diagonal block row H2 matvec results
st = get_wtime_sec();
int n_vec = max_rank + 10;
H2P_dense_mat_p *Yk;
H2P_SPDHSS_H2_acc_matvec(h2mat, n_vec, &Yk);
et = get_wtime_sec();
build_U_t += et - st;
// printf("SPDHSS build: accumulative matvec finished %f.\n", et - st);
fflush(stdout);
// 2. Get the new HSS Bij pairs on each level
st = get_wtime_sec();
H2P_int_vec_p *level_HSS_Bij_pairs;
int n_HSS_Bij_pair;
H2P_SPDHSS_H2_get_level_HSS_Bij_pairs(h2mat, &level_HSS_Bij_pairs, &n_HSS_Bij_pair);
et = get_wtime_sec();
build_B_t += et - st;
// 3. Prepare auxiliary matrices
H2P_dense_mat_p *S = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
H2P_dense_mat_p *V = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
H2P_dense_mat_p *W = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
H2P_dense_mat_p *Minv = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
H2P_dense_mat_p *HSS_U = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
H2P_dense_mat_p *HSS_B = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_HSS_Bij_pair);
H2P_dense_mat_p *HSS_D = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_leaf_node);
ASSERT_PRINTF(
S != NULL && V != NULL && W != NULL && Minv != NULL,
"Failed to allocate %d working arrays for SPDHSS construction\n", 4 * n_node
);
ASSERT_PRINTF(HSS_U != NULL, "Failed to allocate %d SPDHSS U matrices\n", n_node);
ASSERT_PRINTF(HSS_B != NULL, "Failed to allocate %d SPDHSS B matrices\n", n_HSS_Bij_pair);
ASSERT_PRINTF(HSS_D != NULL, "Failed to allocate %d SPDHSS D matrices\n", n_leaf_node);
for (int i = 0; i < n_node; i++)
{
S[i] = NULL;
V[i] = NULL;
W[i] = NULL;
Minv[i] = NULL;
HSS_U[i] = NULL;
}
for (int i = 0; i < n_HSS_Bij_pair; i++) HSS_B[i] = NULL;
for (int i = 0; i < n_leaf_node; i++) HSS_D[i] = NULL;
// 4. Construct all HSS_{B,D}_pair2idx in advance. Some HSS_B_pair2idx(i, j)
// will be set as 0 but no new (i, j) pair will be added later
int *HSS_D_pair2idx = (int*) malloc(sizeof(int) * n_node);
memset(HSS_D_pair2idx, 0, sizeof(int) * n_node);
for (int i = 0; i < n_leaf_node; i++)
{
int node = leaf_nodes[i];
HSS_D_pair2idx[node] = i;
}
st = get_wtime_sec();
int HSS_B_n_pair = 0;
for (int i = max_level; i >= 1; i--)
{
H2P_int_vec_p level_i_HSS_Bij_pairs = level_HSS_Bij_pairs[i];
HSS_B_n_pair += level_i_HSS_Bij_pairs->length / 2;
}
int *HSS_B_pair_i = (int*) malloc(sizeof(int) * HSS_B_n_pair);
int *HSS_B_pair_j = (int*) malloc(sizeof(int) * HSS_B_n_pair);
int *HSS_B_pair_v = (int*) malloc(sizeof(int) * HSS_B_n_pair);
int *HSS_B_p2i_rowptr = (int*) malloc(sizeof(int) * (n_node + 1));
int *HSS_B_p2i_colidx = (int*) malloc(sizeof(int) * HSS_B_n_pair);
int *HSS_B_p2i_val = (int*) malloc(sizeof(int) * HSS_B_n_pair);
ASSERT_PRINTF(
HSS_B_pair_i != NULL && HSS_B_pair_j != NULL && HSS_B_pair_v,
"Failed to allocate work arrays for indexing SPDHSS Bij pairs\n"
);
ASSERT_PRINTF(
HSS_B_p2i_rowptr != NULL && HSS_B_p2i_colidx != NULL && HSS_B_p2i_val,
"Failed to allocate arrays for indexing SPDHSS Bij pairs\n"
);
int HSS_B_idx = 0;
for (int i = max_level; i >= 1; i--)
{
H2P_int_vec_p level_i_HSS_Bij_pairs = level_HSS_Bij_pairs[i];
for (int j = 0; j < level_i_HSS_Bij_pairs->length / 2; j++)
{
int node0 = level_i_HSS_Bij_pairs->data[2 * j];
int node1 = level_i_HSS_Bij_pairs->data[2 * j + 1];
HSS_B_pair_i[HSS_B_idx] = node0;
HSS_B_pair_j[HSS_B_idx] = node1;
HSS_B_pair_v[HSS_B_idx] = HSS_B_idx + 1;
HSS_B_idx++;
}
}
H2P_int_COO_to_CSR(
n_node, HSS_B_idx, HSS_B_pair_i, HSS_B_pair_j, HSS_B_pair_v,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val
);
free(HSS_B_pair_i);
free(HSS_B_pair_j);
free(HSS_B_pair_v);
et = get_wtime_sec();
build_B_t += et - st;
// 5. Loop over all leaf nodes to construct new D matrices
st = get_wtime_sec();
H2P_int_vec_p D_blk0 = h2mat->D_blk0;
const int n_D0_blk = D_blk0->length - 1;
#pragma omp parallel num_threads(n_thread)
{
#pragma omp for schedule(dynamic)
for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++)
{
int D_blk0_s = D_blk0->data[i_blk0];
int D_blk0_e = D_blk0->data[i_blk0 + 1];
if (i_blk0 >= n_D0_blk)
{
D_blk0_s = 0;
D_blk0_e = 0;
}
for (int i = D_blk0_s; i < D_blk0_e; i++)
{
// HSS_D{HSS_D_idx} = H2_D{H2_D_idx} + shift * eye(size(H2_D{H2_D_idx}));
int node = leaf_nodes[i];
int HSS_D_idx = HSS_D_pair2idx[node];
H2P_dense_mat_init(&HSS_D[HSS_D_idx], 8, 8);
H2P_dense_mat_p HSS_Dij = HSS_D[HSS_D_idx];
H2P_get_Dij_block(h2mat, node, node, HSS_Dij);
for (int k = 0; k < HSS_Dij->nrow; k++)
{
int idx_kk = k * (HSS_Dij->nrow + 1);
HSS_Dij->data[idx_kk] += shift;
}
} // End of i loop
} // End of i_blk0 loop
} // End of pragma omp parallel
et = get_wtime_sec();
build_D_t += et - st;
// 6. Level by level hierarchical construction for U and B matrices
int is_SPD = 1;
for (int i = max_level; i >= 1; i--)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
int n_thread_i = MIN(level_i_n_node, n_thread);
if (!is_SPD) continue;
int level_i_HSS_Bij_n_pair = level_HSS_Bij_pairs[i]->length / 2;
int *level_i_HSS_Bij_pairs = level_HSS_Bij_pairs[i]->data;
st = get_wtime_sec();
// Build new U matrices
#pragma omp parallel num_threads(n_thread_i)
{
int tid = omp_get_thread_num();
H2P_int_vec_p idx0 = thread_buf[tid]->idx0;
H2P_dense_mat_p mat0 = thread_buf[tid]->mat0;
H2P_dense_mat_p mat1 = thread_buf[tid]->mat1;
H2P_dense_mat_p mat2 = thread_buf[tid]->mat2;
#pragma omp for schedule(dynamic)
for (int j = 0; j < level_i_n_node; j++)
{
if (!is_SPD) continue;
int node = level_i_nodes[j];
int n_child_node = n_child[node];
int *node_children = children + node * max_child;
int info;
if (n_child_node == 0)
{
// HSS_D_idx = HSS_D_pair2idx(node, node);
int HSS_D_idx = HSS_D_pair2idx[node];
H2P_dense_mat_p HSS_Dij = HSS_D[HSS_D_idx];
// [S{node}, chol_flag] = chol(HSS_D{HSS_D_idx}, 'lower');
H2P_dense_mat_init(&S[node], HSS_Dij->nrow, HSS_Dij->ncol);
copy_matrix_block(sizeof(DTYPE), HSS_Dij->nrow, HSS_Dij->ncol, HSS_Dij->data, HSS_Dij->ld, S[node]->data, S[node]->ld);
info = LAPACK_POTRF(LAPACK_ROW_MAJOR, 'L', S[node]->nrow, S[node]->data, S[node]->ld);
for (int k = 0; k < S[node]->nrow; k++)
{
DTYPE *S_kk1 = S[node]->data + k * S[node]->nrow + (k + 1);
int n_zero_row = S[node]->nrow - (k + 1);
memset(S_kk1, 0, sizeof(DTYPE) * n_zero_row);
}
if (info != 0)
{
ERROR_PRINTF("Node %d potrf() returned %d, target matrix with shifting %.2lf is not SPD\n", node, info, shift);
is_SPD = 0;
continue;
}
// tmpY = linsolve(S{node}, Yk{node}{1}, struct('LT', true));
H2P_dense_mat_p *node_Yk = Yk + node * max_level;
H2P_dense_mat_p tmpY = mat0;
H2P_dense_mat_resize(tmpY, node_Yk[0]->nrow + 1, node_Yk[0]->ncol);
DTYPE *tau = tmpY->data + node_Yk[0]->nrow * node_Yk[0]->ncol;
tmpY->nrow--;
copy_matrix_block(sizeof(DTYPE), tmpY->nrow, tmpY->ncol, node_Yk[0]->data, node_Yk[0]->ld, tmpY->data, tmpY->ld);
ASSERT_PRINTF(
tmpY->nrow == S[node]->nrow,
"Node %d: tmpY->nrow (%d) mismatch S->nrow (%d)\n",
node, tmpY->nrow, S[node]->nrow
);
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
tmpY->nrow, tmpY->ncol, 1.0, S[node]->data, S[node]->ld, tmpY->data, tmpY->ld
);
// V_ncol = min([size(tmpY), max_rank]);
// [tmpQ, ~, ~] = qr(tmpY, 0);
// V{node} = tmpQ(:, 1 : V_ncol);
int tmpQ_ncol = MIN(tmpY->nrow, tmpY->ncol);
int V_ncol = MIN(tmpQ_ncol, max_rank);
H2P_dense_mat_p tmpQ = tmpY;
H2P_int_vec_p jpvt = idx0;
H2P_int_vec_set_capacity(jpvt, tmpQ->ncol);
memset(jpvt->data, 0, sizeof(int) * tmpQ->ncol);
LAPACK_GEQPF(LAPACK_ROW_MAJOR, tmpQ->nrow, tmpQ->ncol, tmpQ->data, tmpQ->ld, jpvt->data, tau);
int V_ncol1 = -1;
DTYPE stop_diag = DABS(tmpQ->data[0]) * reltol;
for (int k = 0; k < V_ncol; k++)
{
if (DABS(tmpQ->data[k * tmpQ->ld + k]) < stop_diag)
{
V_ncol1 = k - 1;
break;
}
}
if (V_ncol1 > 0) V_ncol = V_ncol1;
LAPACK_ORGQR(LAPACK_ROW_MAJOR, tmpQ->nrow, tmpQ_ncol, tmpQ_ncol, tmpQ->data, tmpQ->ld, tau);
H2P_dense_mat_init(&V[node], tmpQ->nrow, V_ncol);
copy_matrix_block(sizeof(DTYPE), tmpQ->nrow, V_ncol, tmpQ->data, tmpQ->ld, V[node]->data, V[node]->ld);
// HSS_U{node} = S{node} * V{node};
H2P_dense_mat_init(&HSS_U[node], S[node]->nrow, V[node]->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, S[node]->nrow, V[node]->ncol, S[node]->ncol,
1.0, S[node]->data, S[node]->ld, V[node]->data, V[node]->ld, 0.0, HSS_U[node]->data, HSS_U[node]->ld
);
// Yk{node}(1) = [];
// for k = 1 : length(Yk{node})
// Yk{node}{k} = V{node}' * linsolve(S{node}, Yk{node}{k}, struct('LT', true));
// end
int last_k = 0;
for (int k = 1; k < max_level; k++)
{
if (node_Yk[k]->ld == 0) break; // Empty Yk{node}{k}
H2P_dense_mat_p node_Yk_k0 = node_Yk[k - 1];
H2P_dense_mat_p node_Yk_k = node_Yk[k];
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
node_Yk_k->nrow, node_Yk_k->ncol, 1.0, S[node]->data, S[node]->ld, node_Yk_k->data, node_Yk_k->ld
);
H2P_dense_mat_resize(node_Yk_k0, V[node]->ncol, node_Yk_k->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V[node]->ncol, node_Yk_k->ncol, V[node]->nrow,
1.0, V[node]->data, V[node]->ld, node_Yk_k->data, node_Yk_k->ld, 0.0, node_Yk_k0->data, node_Yk_k0->ld
);
last_k = k;
} // End of k loop
H2P_dense_mat_destroy(&node_Yk[last_k]);
// if (~isempty(H2_U{node}))
// W{node} = V{node}' * linsolve(S{node}, H2_U{node}, struct('LT', true));
// end
H2P_dense_mat_p H2_U_node = H2_U[node];
if (H2_U_node->ld > 0)
{
H2P_dense_mat_p tmpM = tmpQ;
H2P_dense_mat_resize(tmpM, H2_U_node->nrow, H2_U_node->ncol);
copy_matrix_block(sizeof(DTYPE), H2_U_node->nrow, H2_U_node->ncol, H2_U_node->data, H2_U_node->ld, tmpM->data, tmpM->ld);
ASSERT_PRINTF(
tmpM->nrow == S[node]->nrow,
"Node %d: H2_U->nrow (%d) mismatch S->nrow (%d)\n",
node, tmpM->nrow, S[node]->nrow
);
CBLAS_TRSM(
CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
tmpM->nrow, tmpM->ncol, 1.0, S[node]->data, S[node]->ld, tmpM->data, tmpM->ld
);
H2P_dense_mat_init(&W[node], V[node]->ncol, tmpM->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V[node]->ncol, tmpM->ncol, V[node]->nrow,
1.0, V[node]->data, V[node]->ld, tmpM->data, tmpM->ld, 0.0, W[node]->data, W[node]->ld
);
} // End of "if (H2_U_node->ld > 0)"
} else { // Else of "if (n_child_node == 0)"
// (1) Construct the intermediate blocks defined by its children nodes
H2P_int_vec_set_capacity(idx0, n_child_node + 1);
int *offset = idx0->data;
offset[0] = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
offset[k + 1] = offset[k] + HSS_U[child_k]->ncol;
}
int tmpB_nrow = offset[n_child_node];
H2P_dense_mat_p tmpB = mat0;
H2P_dense_mat_resize(tmpB, tmpB_nrow + 1, tmpB_nrow);
tmpB->nrow--;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
// idx_k = offset(k) : offset(k+1)-1;
int idx_k_s = offset[k];
int idx_k_len = offset[k + 1] - idx_k_s;
for (int l = k + 1; l < n_child_node; l++)
{
int child_l = node_children[l];
// idx_l = offset(l) : offset(l+1)-1;
int idx_l_s = offset[l];
// B_idx = B_pair2idx(child_k, child_l);
int HSS_B_idx = H2P_get_int_CSR_elem(HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, child_k, child_l);
ASSERT_PRINTF(HSS_B_idx != 0, "SPDHSS_B{%d, %d} does not exist!\n", child_k, child_l);
HSS_B_idx--;
H2P_dense_mat_p HSS_B_kl = HSS_B[HSS_B_idx];
// tmpB(idx_k, idx_l) = HSS_B{B_idx};
DTYPE *tmpB_kl = tmpB->data + idx_k_s * tmpB->ld + idx_l_s;
copy_matrix_block(sizeof(DTYPE), HSS_B_kl->nrow, HSS_B_kl->ncol, HSS_B_kl->data, HSS_B_kl->ld, tmpB_kl, tmpB->ld);
// tmpB(idx_l, idx_k) = HSS_B{B_idx}';
// LAPACK_SYEVD only need uppertriangle, no need to fill the lower triangle part
//DTYPE *tmpB_lk = tmpB->data + idx_l_s * tmpB->ld + idx_k_s;
//H2P_transpose_dmat(1, HSS_B_kl->nrow, HSS_B_kl->ncol, HSS_B_kl->data, HSS_B_kl->ld, tmpB_lk, tmpB->ld);
}
// Set the diagonal block to zero
for (int l = idx_k_s; l < idx_k_s + idx_k_len; l++)
{
DTYPE *tmpB_l_ks = tmpB->data + l * tmpB->ld + idx_k_s;
memset(tmpB_l_ks, 0, sizeof(DTYPE) * idx_k_len);
}
} // End of k loop
// (2) Decompose the diagonal matrix
H2P_dense_mat_p tmpQ = tmpB;
DTYPE *tmpE_diag = tmpB->data + tmpQ->nrow * tmpQ->nrow;
// [tmpQ, tmpE] = eig(tmpB);
// tmpE_diag = diag(tmpE);
info = LAPACK_SYEVD(LAPACK_ROW_MAJOR, 'V', 'U', tmpQ->nrow, tmpQ->data, tmpQ->ld, tmpE_diag);
if (info != 0)
{
ERROR_PRINTF("Node %d intermediate diagonal matrix cannot be diagonalized\n", node);
is_SPD = 0;
continue;
}
DTYPE min_diag = 19241112.0;
for (int k = 0; k < tmpQ->nrow; k++) min_diag = MIN(min_diag, tmpE_diag[k]);
if (min_diag <= -1.0)
{
ERROR_PRINTF("Node %d intermediate diagonal matrix has eigenvalue %e < -1\n", node, min_diag);
ERROR_PRINTF("Source H2 matrix with shifting %.3lf is not SPD\n", shift);
is_SPD = 0;
continue;
}
H2P_dense_mat_p tmpM = mat1; // tmpM need to be reused later!
H2P_dense_mat_p tmpQ1 = mat2;
// tmpM = tmpQ * diag((1 + tmpE_diag).^0.5) * tmpQ';
#pragma omp simd
for (int k = 0; k < tmpQ->nrow; k++)
tmpE_diag[k] = DSQRT(1.0 + tmpE_diag[k]);
H2P_dense_mat_resize(tmpQ1, tmpQ->nrow, tmpQ->ncol);
for (int k = 0; k < tmpQ->nrow; k++)
{
DTYPE *tmpQ_k = tmpQ->data + k * tmpQ->ncol;
DTYPE *tmpQ1_k = tmpQ1->data + k * tmpQ->ncol;
#pragma omp simd
for (int l = 0; l < tmpQ->ncol; l++)
tmpQ1_k[l] = tmpQ_k[l] * tmpE_diag[l];
}
H2P_dense_mat_resize(tmpM, tmpQ->nrow, tmpQ->nrow);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasTrans, tmpQ->nrow, tmpQ->nrow, tmpQ->nrow,
1.0, tmpQ1->data, tmpQ1->ld, tmpQ->data, tmpQ->ld, 0.0, tmpM->data, tmpM->ld
);
// Minv{node} = tmpQ * diag((1 + tmpE_diag).^-0.5) * tmpQ';
#pragma omp simd
for (int k = 0; k < tmpQ->nrow; k++)
tmpE_diag[k] = 1.0 / tmpE_diag[k];
H2P_dense_mat_resize(tmpQ1, tmpQ->nrow, tmpQ->ncol);
for (int k = 0; k < tmpQ->nrow; k++)
{
DTYPE *tmpQ_k = tmpQ->data + k * tmpQ->ncol;
DTYPE *tmpQ1_k = tmpQ1->data + k * tmpQ->ncol;
#pragma omp simd
for (int l = 0; l < tmpQ->ncol; l++)
tmpQ1_k[l] = tmpQ_k[l] * tmpE_diag[l];
}
H2P_dense_mat_init(&Minv[node], tmpQ->nrow, tmpQ->nrow);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasTrans, tmpQ->nrow, tmpQ->nrow, tmpQ->nrow,
1.0, tmpQ1->data, tmpQ1->ld, tmpQ->data, tmpQ->ld, 0.0, Minv[node]->data, Minv[node]->ld
);
// Now mat0 and mat2 can be reused
// (3) Construct basis matrix
H2P_dense_mat_p *node_Yk = Yk + node * max_level;
H2P_int_vec_p tmpYk_idx = idx0;
H2P_int_vec_set_capacity(tmpYk_idx, n_child_node);
tmpYk_idx->length = n_child_node;
for (int l = 0; l < node_level[node]; l++)
{
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
tmpYk_idx->data[k] = child_k * max_level + l;
}
H2P_dense_mat_p node_Yk_l = node_Yk[l];
H2P_dense_mat_vertcat(Yk, tmpYk_idx, node_Yk_l);
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
int Yk_idx = child_k * max_level + l;
H2P_dense_mat_destroy(&Yk[Yk_idx]);
}
}
// tmpY = Minv{node} * Yk{node}{1};
H2P_dense_mat_p tmpY = mat0;
H2P_dense_mat_resize(tmpY, Minv[node]->nrow + 1, node_Yk[0]->ncol);
DTYPE *tau = tmpY->data + Minv[node]->nrow * node_Yk[0]->ncol;
tmpY->nrow--;
ASSERT_PRINTF(
Minv[node]->ncol == node_Yk[0]->nrow,
"Node %d: Minv->ncol (%d) mismatch node_Yk[0]->nrow (%d)\n",
node, Minv[node]->ncol, node_Yk[0]->nrow
);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv[node]->nrow, tmpY->ncol, Minv[node]->ncol,
1.0, Minv[node]->data, Minv[node]->ld, node_Yk[0]->data, node_Yk[0]->ld, 0.0, tmpY->data, tmpY->ld
);
// tmpQ_ncol = min([size(tmpY), max_rank]);
// [tmpQ, ~, ~] = qr(tmpY, 0);
// V{node} = tmpQ(:, 1 : tmpQ_ncol);
int tmpQ_ncol = MIN(tmpY->nrow, tmpY->ncol);
int V_ncol = MIN(tmpQ_ncol, max_rank);
tmpQ = tmpY;
H2P_int_vec_p jpvt = idx0;
H2P_int_vec_set_capacity(jpvt, tmpQ->ncol);
memset(jpvt->data, 0, sizeof(int) * tmpQ->ncol);
LAPACK_GEQPF(LAPACK_ROW_MAJOR, tmpQ->nrow, tmpQ->ncol, tmpQ->data, tmpQ->ld, jpvt->data, tau);
int V_ncol1 = -1;
DTYPE stop_diag = DABS(tmpQ->data[0]) * reltol;
for (int k = 0; k < V_ncol; k++)
{
if (DABS(tmpQ->data[k * tmpQ->ld + k]) < stop_diag)
{
V_ncol1 = k - 1;
break;
}
}
if (V_ncol1 > 0) V_ncol = V_ncol1;
LAPACK_ORGQR(LAPACK_ROW_MAJOR, tmpQ->nrow, tmpQ_ncol, tmpQ_ncol, tmpQ->data, tmpQ->ld, tau);
H2P_dense_mat_init(&V[node], tmpQ->nrow, V_ncol);
copy_matrix_block(sizeof(DTYPE), tmpQ->nrow, V_ncol, tmpQ->data, tmpQ->ld, V[node]->data, V[node]->ld);
// HSS_U{node} = tmpM * V{node};
H2P_dense_mat_init(&HSS_U[node], tmpM->nrow, V[node]->ncol);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, tmpM->nrow, V[node]->ncol, tmpM->ncol,
1.0, tmpM->data, tmpM->ld, V[node]->data, V[node]->ld, 0.0, HSS_U[node]->data, HSS_U[node]->ld
);
// Now mat1 can be reused
// Yk{node}(1) = [];
// for k = 1 : length(Yk{node})
// Yk{node}{k} = V{node}' * Minv{node} * Yk{node}{k};
// end
int last_k = 0;
for (int k = 1; k < max_level; k++)
{
if (node_Yk[k]->ld == 0) break; // Empty Yk{node}{k}
H2P_dense_mat_p node_Yk_k0 = node_Yk[k - 1];
H2P_dense_mat_p node_Yk_k = node_Yk[k];
H2P_dense_mat_resize(tmpM, Minv[node]->nrow, node_Yk_k->ncol);
ASSERT_PRINTF(
Minv[node]->ncol == node_Yk_k->nrow,
"Node %d: Minv->ncol (%d) mismatch node_Yk[%d]->nrow (%d)",
node, Minv[node]->ncol, k, node_Yk_k->nrow
);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv[node]->nrow, node_Yk_k->ncol, Minv[node]->ncol,
1.0, Minv[node]->data, Minv[node]->ld, node_Yk_k->data, node_Yk_k->ld, 0.0, tmpM->data, tmpM->ld
);
H2P_dense_mat_resize(node_Yk_k0, V[node]->ncol, tmpM->ncol);
ASSERT_PRINTF(
V[node]->nrow == Minv[node]->nrow,
"Node %d: V->nrow (%d) mismatch tmpM->ncol (%d)\n",
node, V[node]->nrow, Minv[node]->nrow
);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V[node]->ncol, tmpM->ncol, V[node]->nrow,
1.0, V[node]->data, V[node]->ld, tmpM->data, tmpM->ld, 0.0, node_Yk_k0->data, node_Yk_k0->ld
);
last_k = k;
} // End of k loop
H2P_dense_mat_destroy(&node_Yk[last_k]);
// if (~isempty(H2_U{node}))
// child_node = children(node, 1 : n_child_node);
// tmpW = blkdiag(W{child_node});
// W{node} = V{node}' * (Minv{node} * (tmpW * H2_U{node}));
// end
H2P_dense_mat_p H2_U_node = H2_U[node];
if (H2_U_node->ld > 0)
{
H2P_dense_mat_p tmpM0 = mat1;
H2P_dense_mat_p tmpM1 = mat2;
// Don't use blkdiag, directly multiple each child node's W with H2_U{node}
int tmpW_nrow = 0, tmpW_ncol = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
tmpW_nrow += W[child_k]->nrow;
tmpW_ncol += W[child_k]->ncol;
}
ASSERT_PRINTF(
tmpW_ncol == H2_U_node->nrow,
"Node %d: tmpW->ncol (%d) mismatch H2_U->nrow (%d)\n",
node, tmpW_ncol, H2_U_node->nrow
);
H2P_dense_mat_resize(tmpM0, tmpW_nrow, H2_U_node->ncol);
tmpW_nrow = 0;
tmpW_ncol = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
H2P_dense_mat_p W_k = W[child_k];
DTYPE *tmpM0_k_row = tmpM0->data + tmpW_nrow * tmpM0->ld;
DTYPE *H2_U_k_col = H2_U_node->data + tmpW_ncol * H2_U_node->ld;
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, W_k->nrow, H2_U_node->ncol, W_k->ncol,
1.0, W_k->data, W_k->ld, H2_U_k_col, H2_U_node->ld, 0.0, tmpM0_k_row, tmpM0->ld
);
tmpW_nrow += W_k->nrow;
tmpW_ncol += W_k->ncol;
H2P_dense_mat_destroy(&W[child_k]);
}
// The rest GEMM
H2P_dense_mat_resize(tmpM1, Minv[node]->nrow, tmpM0->ncol);
ASSERT_PRINTF(
Minv[node]->ncol == tmpM0->nrow,
"Node %d: Minv->ncol (%d) mismatch tmpM0->nrow (%d)\n",
node, Minv[node]->ncol, tmpM0->nrow
);
CBLAS_GEMM(
CblasRowMajor, CblasNoTrans, CblasNoTrans, Minv[node]->nrow, tmpM0->ncol, Minv[node]->ncol,
1.0, Minv[node]->data, Minv[node]->ld, tmpM0->data, tmpM0->ld, 0.0, tmpM1->data, tmpM1->ld
);
H2P_dense_mat_init(&W[node], V[node]->ncol, tmpM1->ncol);
ASSERT_PRINTF(
V[node]->nrow == tmpM1->nrow,
"Node %d: V->nrow (%d) mismatch tmpM1->nrow (%d)\n",
node, V[node]->nrow, tmpM1->nrow
);
CBLAS_GEMM(
CblasRowMajor, CblasTrans, CblasNoTrans, V[node]->ncol, tmpM1->ncol, V[node]->nrow,
1.0, V[node]->data, V[node]->ld, tmpM1->data, tmpM1->ld, 0.0, W[node]->data, W[node]->ld
);
} // End of "if (H2_U_node->ld > 0)"
} // End of "if (n_child_node == 0)"
} // End of j loop
} // End of "#pragma omp parallel"
et = get_wtime_sec();
build_U_t += et - st;
// printf("SPDHSS build at %d level: U build %f.\n", i, et - st);
fflush(stdout);
st = get_wtime_sec();
// Build new B matrices
#pragma omp parallel num_threads(n_thread_i)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int j = 0; j < level_i_HSS_Bij_n_pair; j++)
{
int node0 = level_i_HSS_Bij_pairs[2 * j];
int node1 = level_i_HSS_Bij_pairs[2 * j + 1];
H2P_SPDHSS_H2_calc_HSS_Bij(
h2mat, node0, node1, tid, S, V, W, Minv,
HSS_B_p2i_rowptr, HSS_B_p2i_colidx, HSS_B_p2i_val, HSS_B
);
} // End of j loop
} // End of "#pragma omp parallel"
et = get_wtime_sec();
build_B_t += et - st;
// printf("SPDHSS build at %d level: B build %f.\n", i, et - st);
fflush(stdout);
} // End of i loop
// 7. Wrap the new SPD HSS matrix
H2P_SPDHSS_H2_wrap_new_HSS(
h2mat, HSS_U, HSS_B, HSS_D, HSS_B_p2i_rowptr, HSS_B_p2i_colidx,
HSS_B_p2i_val, HSS_D_pair2idx, hssmat_
);
(*hssmat_)->timers[U_BUILD_TIMER_IDX] = build_U_t;
(*hssmat_)->timers[B_BUILD_TIMER_IDX] = build_B_t;
(*hssmat_)->timers[D_BUILD_TIMER_IDX] = build_D_t;
(*hssmat_)->is_HSS_SPD = is_SPD;
#ifdef __linux__
// Restore default value
mallopt(M_MMAP_THRESHOLD, 128 * 1024);
#endif
// 8. Delete intermediate arrays and matrices
for (int i = 0; i < n_level; i++)
H2P_int_vec_destroy(&level_HSS_Bij_pairs[i]);
for (int i = 0; i < n_node; i++)
{
H2P_dense_mat_destroy(&S[i]);
H2P_dense_mat_destroy(&V[i]);
H2P_dense_mat_destroy(&W[i]);
H2P_dense_mat_destroy(&Minv[i]);
}
for (int i = 0; i < n_HSS_Bij_pair; i++)
H2P_dense_mat_destroy(&HSS_B[i]);
for (int i = 0; i < n_leaf_node; i++)
H2P_dense_mat_destroy(&HSS_D[i]);
for (int i = 0; i < n_node * max_level; i++)
H2P_dense_mat_destroy(&Yk[i]);
free(level_HSS_Bij_pairs);
free(S);
free(V);
free(W);
free(Minv);
free(HSS_B);
free(HSS_D);
free(HSS_D_pair2idx);
free(HSS_B_p2i_rowptr);
free(HSS_B_p2i_colidx);
free(HSS_B_p2i_val);
}
|
helloworld.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
// Each thread has a private copy of the number of threads
// and its own number
#pragma omp parallel private(nthreads, tid)
{
nthreads = omp_get_num_threads();
// Obtain thread number
tid = omp_get_thread_num();
printf("'Hello, World!' from thread %d\n", tid);
// Thread no. 0 does additional work
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} // End of parallel region
return 0;
}
|
c_patchmatch_64.c | #include "stdlib.h" /* rand, malloc */
#include "string.h" /* memcpy */
#include "stdio.h" /* printf */
#include "time.h" /* time */
#include "math.h" /* fabs */
#include "omp.h"
#include "c_patchmatch.h"
#define print(a, args...) printf("%s(%s:%d) " a, __func__,__FILE__, __LINE__, ##args)
#define println(a, args...) print(a "\n", ##args)
// Numerical recipes, Section 7.1
#define RAND(x) (4294957665 * ((x) & 4294967295) + ((x) >> 32))
typedef double float_type;
/*
Input params
============
source : source image
source_ind : linear indices in the source image
target : target image
target_ind : linear indices in the target image
neighbors : indices of nearest neighbors
on input contain initial guess
distances : distances to nearest neighbors
source_size : length of the source_ind array
target_size : length of the target_ind array
*/
/*
PRE
===
1) 2d arrays are in C order
*/
int im_x, im_y, im_ch, patch_size;
int *patch_h;
float_type *source, *target, *pweight, *lambda;
/* L2 weighted patch distance */
// inline float_type L2d2(int ps, int pt)
// {
// // #pragma omp parallel
// // {
// // fprintf(stderr,"%d\n", omp_get_thread_num());
// float_type dist = 0.0;
// for (int ch = 0; ch < im_ch; ch++){
// int offset_s = ch*im_x*im_y + ps;
// int offset_t = ch*im_x*im_y + pt;
// float_type *weight = pweight;
// int *h = patch_h;
// for (int i = 0; i < patch_size; i++, weight++, h++ ){
// int s_id = offset_s + (*h);
// int t_id = offset_t + (*h);
// dist += lambda[t_id] * (source[s_id] - target[t_id]) * (source[s_id] - target[t_id]) * (*weight);
// }
// }
// return dist;
// // }
// }
inline float_type L2d2(int ps, int pt)
{
float_type *weight = pweight;
float_type dist = 0.0;
for (int i = 0; i < patch_size; i++, weight++ ){
for (int ch = 0; ch < im_ch; ch++){
int s_id = (ps + patch_h[i]) * im_ch + ch;
int t_id = (pt + patch_h[i]) * im_ch + ch;
dist += lambda[t_id] * (source[s_id] - target[t_id]) * (source[s_id] - target[t_id]) * (*weight);
}
}
return dist;
}
void pm_64( float_type* source_im, char* source_mask, int* source_ind, int source_y, int source_x, int source_ch,
float_type* target_im, char* target_mask, int* target_ind, int target_y, int target_x, int target_ch, int target_ind_size,
int *neighbors, float_type *distances, int* patch_ind, int patch_ind_size, float_type* weight, float_type* lambdas, int max_rand_shots, int max_iterations, int max_window_size, float_type TOL )
{
/* Global vars */
im_y = target_y;
im_x = target_x;
im_ch = target_ch;
source = source_im;
target = target_im;
pweight = weight;
lambda = lambdas;
patch_h = patch_ind;
patch_size = patch_ind_size;
int target_mask_size = target_x * target_y;
int source_mask_size = source_x * source_y;
/* Search window size */
int window_size = (max_window_size != -1) ? max_window_size : ( (source_x>source_y) ? source_x-1 : source_y-1 );
/* Distances for the given initial guess */
for ( int i = 0; i < target_ind_size; i++ )
distances[target_ind[i]] = L2d2(neighbors[target_ind[i]], target_ind[i]);
int no_improve_iters = 0;
float_type max_dist = 0.0, max_dist_old;
/* Improve NNF */
for (int iter = 0; iter < max_iterations; iter++){
int ind_begin, ind_end, shift;
/* Even iterations, scanline */
if ( iter % 2 == 0 ){
ind_begin = 0;
ind_end = target_ind_size;
shift = -1;
/* Odd iterations, reverse scanline */
}else{
ind_begin = target_ind_size - 1;
ind_end = -1;
shift = 1;
}
/* loop through the target image */
// #pragma omp for schedule(static) ordered
#pragma omp parallel for schedule(static) default(shared)
for (int count = 0; count<target_ind_size; count++){
// srand(time(NULL)+omp_get_thread_num());
int targ_ind = target_ind[ind_begin-count*shift];
float_type dist = distances[targ_ind];
int nn = neighbors[targ_ind];
/* Propagation step */
/* Left\Right neighbor */
int shifted_ind = targ_ind + shift;
if ( shifted_ind>=0 && shifted_ind<target_mask_size && target_mask[shifted_ind]>0 ){
int candidate_nn = neighbors[shifted_ind] - shift;
if ( candidate_nn>=0 && candidate_nn<source_mask_size && source_mask[candidate_nn]>0 ){
float_type candidate_dist = L2d2(candidate_nn, targ_ind);
if ( candidate_dist < dist ){
dist = candidate_dist;
nn = candidate_nn;
}
}
}
/* Above\Below neighbor */
shifted_ind = targ_ind + shift * im_x;
if ( shifted_ind>=0 && shifted_ind<target_mask_size && target_mask[shifted_ind]>0 ){
int candidate_nn = neighbors[shifted_ind] - shift * im_x;
if ( candidate_nn>=0 && candidate_nn<source_mask_size && source_mask[candidate_nn]>0 ){
float_type candidate_dist = L2d2(candidate_nn, targ_ind);
if ( candidate_dist < dist ){
dist = candidate_dist;
nn = candidate_nn;
}
}
}
/* Random search step */
unsigned long long int seed1 = time(NULL) + omp_get_thread_num();
unsigned long long int seed2;
int nn_x = nn % source_x;
int nn_y = nn / source_x;
int rand_shots = max_rand_shots;
for (int w_size = window_size; w_size >= 1; w_size /= 2, rand_shots /= 2){
rand_shots = rand_shots<3 ? 3 : rand_shots;
/* truncate window to account for the source image size */
int x_min = ((nn_x-w_size) > 0) ? nn_x - w_size : 0;
int y_min = ((nn_y-w_size) > 0) ? nn_y - w_size : 0;
int x_max = ((nn_x+w_size) < source_x) ? nn_x + w_size : source_x - 1;
int y_max = ((nn_y+w_size) < source_y) ? nn_y + w_size : source_y - 1;
/* sample max_rand_shots pixels from the window around current nn */
int random_no_improve = 0;
for (int k = 0; k < rand_shots; k++){
// int candidate_nn = source_x * ( y_min + rand() % (y_max-y_min) ) + x_min + rand() % (x_max-x_min);
seed2 = RAND(seed1); seed1 = RAND(seed2);
// note that only lowest 32 bits are taken
int candidate_nn = source_x * ( y_min + (seed1 & 0xFFFFFFFF) % (y_max-y_min) ) + x_min + (seed2 & 0xFFFFFFFF) % (x_max-x_min);
if ( source_mask[candidate_nn] > 0 ){
float_type candidate_dist = L2d2(candidate_nn, targ_ind);
if ( candidate_dist < dist ){
dist = candidate_dist;
nn = candidate_nn;
}
else{
random_no_improve += 1;
}
}
if (random_no_improve>10) break;
}
}
if ( dist < distances[targ_ind] ){
distances[targ_ind] = dist;
neighbors[targ_ind] = nn;
}
}
/* Max distance */
max_dist_old = max_dist;
max_dist = distances[target_ind[ind_begin]];
for (int ind = ind_begin-shift; ind != ind_end; ind -= shift)
max_dist = fmax(max_dist,distances[target_ind[ind]]);
no_improve_iters = (fabs(max_dist-max_dist_old)<TOL) ? no_improve_iters+1 : 0;
// /* Max distance */
// max_dist_old = max_dist;
// max_dist = distances[target_ind[ind_begin]];
// if ( iter % 2 == 0 ){
// #pragma omp parallel for schedule(static) reduction(max:max_dist) default(shared)
// for (int ind = ind_begin+1; ind<ind_end; ind++)
// max_dist = fmax(max_dist,distances[target_ind[ind]]);
// }else{
// #pragma omp parallel for schedule(static) reduction(max:max_dist) default(shared)
// for (int ind = ind_begin-1; ind>ind_end; ind--)
// max_dist = fmax(max_dist,distances[target_ind[ind]]);
// }
// no_improve_iters = (fabs(max_dist-max_dist_old)<TOL) ? no_improve_iters+1 : 0;
/* Early break if desired tolerance is achieved or there is no improvement */
if ( no_improve_iters>6 || max_dist < TOL ) break;
}
} |
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public Stmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public Stmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 64
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen529027 Tcgen529027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj178006 Ropeobj178006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47304 Cell47304;
typedef struct Cellseq47320 Cellseq47320;
typedef struct Gcheap49818 Gcheap49818;
typedef struct Gcstack49816 Gcstack49816;
typedef struct Memregion29486 Memregion29486;
typedef struct Smallchunk29440 Smallchunk29440;
typedef struct Llchunk29480 Llchunk29480;
typedef struct Bigchunk29442 Bigchunk29442;
typedef struct Intset29414 Intset29414;
typedef struct Trunk29410 Trunk29410;
typedef struct Avlnode29484 Avlnode29484;
typedef struct Gcstat49814 Gcstat49814;
typedef struct Cellset47316 Cellset47316;
typedef struct Pagedesc47312 Pagedesc47312;
typedef struct Ttypeseq292836 Ttypeseq292836;
typedef struct Ttype292840 Ttype292840;
typedef struct Intset268030 Intset268030;
typedef struct Trunk268026 Trunk268026;
typedef struct Trunkseq268028 Trunkseq268028;
typedef struct Tpasscontext341002 Tpasscontext341002;
typedef struct Tsym292834 Tsym292834;
typedef struct Tidobj199004 Tidobj199004;
typedef struct TNimObject TNimObject;
typedef struct TY292929 TY292929;
typedef struct Tstrtable292806 Tstrtable292806;
typedef struct Tsymseq292804 Tsymseq292804;
typedef struct Tident199010 Tident199010;
typedef struct Tlineinfo191336 Tlineinfo191336;
typedef struct Tnode292802 Tnode292802;
typedef struct Tloc292816 Tloc292816;
typedef struct Tlib292820 Tlib292820;
typedef struct TY529153 TY529153;
typedef struct TY203018 TY203018;
typedef struct Tidtable292850 Tidtable292850;
typedef struct Tidpairseq292848 Tidpairseq292848;
typedef struct Tlinkedlist147013 Tlinkedlist147013;
typedef struct Tlistentry147007 Tlistentry147007;
typedef struct Tcproc529021 Tcproc529021;
typedef struct Tnodetable292862 Tnodetable292862;
typedef struct Tnodepairseq292860 Tnodepairseq292860;
typedef struct Debuginfo203009 Debuginfo203009;
typedef struct TY203021 TY203021;
typedef struct TY203023 TY203023;
typedef struct Tnodeseq292796 Tnodeseq292796;
typedef struct TY191350 TY191350;
typedef struct TY529095 TY529095;
typedef struct Trodreader332021 Trodreader332021;
typedef struct TY292960 TY292960;
typedef struct TY203017 TY203017;
typedef struct Enumdesc203007 Enumdesc203007;
typedef struct Tinfocc273008 Tinfocc273008;
typedef struct Tblock529019 Tblock529019;
typedef struct Ttraversalclosure537019 Ttraversalclosure537019;
typedef struct TY135002 TY135002;
typedef struct Tbitset339004 Tbitset339004;
typedef struct TY191612 TY191612;
typedef struct Tfileinfo191334 Tfileinfo191334;
typedef struct Tinfoos176035 Tinfoos176035;
typedef struct Tinfocpu176476 Tinfocpu176476;
typedef struct Tstrentry147009 Tstrentry147009;
typedef struct TY128506 TY128506;
typedef struct Basechunk29438 Basechunk29438;
typedef struct Freecell29430 Freecell29430;
typedef struct Tinstantiation292824 Tinstantiation292824;
typedef struct Tidpair292846 Tidpair292846;
typedef struct Tnodepair292858 Tnodepair292858;
typedef struct Filenamemapping203005 Filenamemapping203005;
typedef struct TY332033 TY332033;
typedef struct Tindex332019 Tindex332019;
typedef struct Tiitable299142 Tiitable299142;
typedef struct Tiipairseq299140 Tiipairseq299140;
typedef struct Table332054 Table332054;
typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057;
typedef struct Memfile330202 Memfile330202;
typedef struct TY292961 TY292961;
typedef struct Tiipair299138 Tiipair299138;
typedef struct Keyvaluepair332060 Keyvaluepair332060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47304 {
NI refcount;
TNimType* typ;
};
struct Cellseq47320 {
NI len;
NI cap;
Cell47304** d;
};
typedef Smallchunk29440* TY29501[512];
typedef Trunk29410* Trunkbuckets29412[256];
struct Intset29414 {
Trunkbuckets29412 data;
};
struct Memregion29486 {
NI minlargeobj;
NI maxlargeobj;
TY29501 freesmallchunks;
Llchunk29480* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29442* freechunkslist;
Intset29414 chunkstarts;
Avlnode29484* root;
Avlnode29484* deleted;
Avlnode29484* last;
Avlnode29484* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat49814 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47316 {
NI counter;
NI max;
Pagedesc47312* head;
Pagedesc47312** data;
};
struct Gcheap49818 {
Gcstack49816* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47320 zct;
Cellseq47320 decstack;
Cellseq47320 tempstack;
NI recgclock;
Memregion29486 region;
Gcstat49814 stat;
Cellset47316 marked;
Cellseq47320 additionalroots;
};
struct Intset268030 {
NI counter;
NI max;
Trunk268026* head;
Trunkseq268028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj199004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind292435;
struct Tstrtable292806 {
NI counter;
Tsymseq292804* data;
};
typedef NU16 Tmagic292524;
struct Tlineinfo191336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag292184Set;
typedef NU32 Toption169009Set;
typedef NU8 Tlockind292808;
typedef NU8 Tstorageloc292812;
typedef NU16 Tlocflag292810Set;
struct Tloc292816 {
Tlockind292808 k;
Tstorageloc292812 s;
Tlocflag292810Set flags;
Ttype292840* t;
Ropeobj178006* r;
};
struct Tsym292834 {
Tidobj199004 Sup;
Tsymkind292435 kind;
union{
struct {Ttypeseq292836* typeinstcache;
} S1;
struct {TY292929* procinstcache;
Tsym292834* gcunsafetyreason;
} S2;
struct {TY292929* usedgenerics;
Tstrtable292806 tab;
} S3;
struct {Tsym292834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic292524 magic;
Ttype292840* typ;
Tident199010* name;
Tlineinfo191336 info;
Tsym292834* owner;
Tsymflag292184Set flags;
Tnode292802* ast;
Toption169009Set options;
NI position;
NI offset;
Tloc292816 loc;
Tlib292820* annex;
Tnode292802* constraint;
};
struct TY203018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext341002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj178006* Tcfilesections529009[18];
typedef NU8 Codegenflag529025Set;
struct Tidtable292850 {
NI counter;
Tidpairseq292848* data;
};
struct Tlinkedlist147013 {
Tlistentry147007* head;
Tlistentry147007* tail;
NI counter;
};
struct Tnodetable292862 {
NI counter;
Tnodepairseq292860* data;
};
typedef Ropeobj178006* TY529136[10];
struct Tcgen529027 {
Tpasscontext341002 Sup;
Tcfilesections529009 s;
Codegenflag529025Set flags;
Tsym292834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj178006* tmpbase;
Tidtable292850 typecache;
Tidtable292850 forwtypecache;
Intset268030 declaredthings;
Intset268030 declaredprotos;
Tlinkedlist147013 headerfiles;
Intset268030 typeinfomarker;
Tcproc529021* initproc;
Tcproc529021* postinitproc;
Tcproc529021* preinitproc;
Ttypeseq292836* typestack;
Tnodetable292862 datacache;
Tsymseq292804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj178006* typenodesname;
Ropeobj178006* nimtypesname;
NI labels;
TY529136 extensionloaders;
Ropeobj178006* injectstmt;
};
struct Debuginfo203009 {
NI version;
TY203021* files;
TY203023* enums;
NIM_BOOL conflicts;
};
struct Tident199010 {
Tidobj199004 Sup;
NimStringDesc* s;
Tident199010* next;
NI h;
};
struct Tcproc529021 {
Tsym292834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo191336 lastlineinfo;
Tnodeseq292796* nestedtrystmts;
NI inexceptblock;
TY191350* finallysafepoints;
NI labels;
TY529095* blocks;
NI breakidx;
Toption169009Set options;
NI maxframelen;
Tcgen529027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj178006* gcframetype;
};
typedef NU8 Tsymflag292184;
typedef NU8 Codegenflag529025;
typedef NU8 Toption169009;
typedef NU64 Tglobaloption169013Set;
typedef NU8 Tglobaloption169013;
typedef NU8 Tcommands169076;
typedef NU16 Tnodeflag292427Set;
typedef NU8 Tnodekind292020;
struct Tnode292802 {
Ttype292840* typ;
Tlineinfo191336 info;
Tnodeflag292427Set flags;
Tnodekind292020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym292834* sym;
} S4;
struct {Tident199010* ident;
} S5;
struct {Tnodeseq292796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj178006* TY533289[1];
typedef NU8 Tlocflag292810;
struct Tlistentry147007 {
TNimObject Sup;
Tlistentry147007* prev;
Tlistentry147007* next;
};
typedef NU8 Tlibkind292818;
struct Tlib292820 {
Tlistentry147007 Sup;
Tlibkind292818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj178006* name;
Tnode292802* path;
};
typedef NU8 Tcfilesection529005;
typedef NU8 Ttypekind292244;
typedef NU8 Tcallingconvention292002;
typedef NU32 Ttypeflag292431Set;
struct Ttype292840 {
Tidobj199004 Sup;
Ttypekind292244 kind;
Tcallingconvention292002 callconv;
Ttypeflag292431Set flags;
Ttypeseq292836* sons;
Tnode292802* n;
Tsym292834* owner;
Tsym292834* sym;
Tsym292834* destructor;
Tsym292834* deepcopy;
Tsym292834* assignment;
TY292960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc292816 loc;
};
typedef Ropeobj178006* TY532811[2];
typedef NU8 Tctypekind529007;
typedef NU64 Ttypekind292244Set;
typedef NU8 Ttypeflag292431;
typedef NimStringDesc* TY533943[14];
typedef NU8 Tprefereddesc320011;
typedef Ropeobj178006* TY178507[1];
struct Enumdesc203007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY203017* values;
};
typedef Ropeobj178006* TY535235[4];
typedef NimStringDesc* TY292016[10];
typedef Ropeobj178006* TY535238[3];
struct Ropeobj178006 {
TNimObject Sup;
Ropeobj178006* left;
Ropeobj178006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop273004Set;
struct Tinfocc273008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop273004Set Field20;
};
typedef Tinfocc273008 TY273427[13];
typedef NU8 Tsystemcc273002;
typedef NU8 Tnodeflag292427;
typedef NU8 Tcprocsection529011;
typedef Ropeobj178006* Tcprocsections529013[3];
struct Tblock529019 {
NI id;
Ropeobj178006* label;
Tcprocsections529013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode169080;
typedef NU8 Ttypeinforeason537016;
struct Ttraversalclosure537019 {
Tcproc529021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult320145;
typedef NU8 Tinfoccprop273004;
typedef Ropeobj178006* TY536847[6];
typedef Ropeobj178006* TY536401[7];
typedef Ropeobj178006* TY536475[5];
typedef NU16 Tmsgkind191002;
typedef NU8 Tassignmentflag538302Set;
typedef NU8 Tassignmentflag538302;
typedef NimStringDesc* TY552655[19];
typedef NimStringDesc* TY551642[3];
typedef NimStringDesc* TY556765[4];
typedef NimStringDesc* TY551828[42];
typedef NimStringDesc* TY551281[7];
typedef NU8 Trenderflag311004Set;
typedef NimStringDesc* TY557052[2];
typedef NU8 Tclosuretypekind535681;
typedef NimStringDesc* TY556428[6];
typedef NU8 Tanalysisresult473003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare324427;
typedef NU8 Ttypecmpflag324429Set;
typedef NU16 Tspecialword275003;
typedef NU8 Tsystemos176004;
struct Tfileinfo191334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj178006* quotedname;
Ropeobj178006* quotedfullname;
TY191350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop176031Set;
struct Tinfoos176035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop176031Set Field12;
};
typedef Tinfoos176035 TY176082[24];
typedef NU8 Tendian176474;
struct Tinfocpu176476 {
NimStringDesc* Field0;
NI Field1;
Tendian176474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu176476 TY176510[19];
typedef NU8 Tsystemcpu176452;
struct Tstrentry147009 {
Tlistentry147007 Sup;
NimStringDesc* data;
};
struct TY128506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack49816 {
Gcstack49816* prev;
Gcstack49816* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29438 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29440 {
Basechunk29438 Sup;
Smallchunk29440* next;
Smallchunk29440* prev;
Freecell29430* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29480 {
NI size;
NI acc;
Llchunk29480* next;
};
struct Bigchunk29442 {
Basechunk29438 Sup;
Bigchunk29442* next;
Bigchunk29442* prev;
NI align;
NF data;
};
typedef NI TY29419[8];
struct Trunk29410 {
Trunk29410* next;
NI key;
TY29419 bits;
};
typedef Avlnode29484* TY29491[2];
struct Avlnode29484 {
TY29491 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47312 {
Pagedesc47312* next;
NI key;
TY29419 bits;
};
struct Trunk268026 {
Trunk268026* next;
NI key;
TY29419 bits;
};
struct Tidpair292846 {
Tidobj199004* key;
TNimObject* val;
};
struct Tnodepair292858 {
NI h;
Tnode292802* key;
NI val;
};
struct Filenamemapping203005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile332002;
struct Tiitable299142 {
NI counter;
Tiipairseq299140* data;
};
struct Tindex332019 {
NI lastidxkey;
NI lastidxval;
Tiitable299142 tab;
NimStringDesc* r;
NI offset;
};
struct Table332054 {
Keyvaluepairseq332057* data;
NI counter;
};
struct Memfile330202 {
void* mem;
NI size;
int handle;
};
struct Trodreader332021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption169009Set options;
Treasonforrecompile332002 reason;
TY332033* moddeps;
TY332033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex332019 index;
Tindex332019 imports;
NI readerindex;
NI line;
NI moduleid;
Table332054 syms;
Memfile330202 memfile;
Tsymseq292804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY292961 {
NI Field0;
Tsym292834* Field1;
};
struct Freecell29430 {
Freecell29430* next;
NI zerofield;
};
struct Tinstantiation292824 {
Tsym292834* sym;
Ttypeseq292836* concretetypes;
NI compilesid;
};
struct Tiipair299138 {
NI key;
NI val;
};
struct Keyvaluepair332060 {
NI Field0;
NI Field1;
Tsym292834* Field2;
};
struct Ttypeseq292836 {
TGenericSeq Sup;
Ttype292840* data[SEQ_DECL_SIZE];
};
struct TY529153 {
TGenericSeq Sup;
Tcgen529027* data[SEQ_DECL_SIZE];
};
struct Tsymseq292804 {
TGenericSeq Sup;
Tsym292834* data[SEQ_DECL_SIZE];
};
struct TY203017 {
TGenericSeq Sup;
TY203018 data[SEQ_DECL_SIZE];
};
struct TY135002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset339004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY529095 {
TGenericSeq Sup;
Tblock529019 data[SEQ_DECL_SIZE];
};
struct TY191350 {
TGenericSeq Sup;
Ropeobj178006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq292796 {
TGenericSeq Sup;
Tnode292802* data[SEQ_DECL_SIZE];
};
struct TY191612 {
TGenericSeq Sup;
Tfileinfo191334 data[SEQ_DECL_SIZE];
};
struct Trunkseq268028 {
TGenericSeq Sup;
Trunk268026* data[SEQ_DECL_SIZE];
};
struct TY292929 {
TGenericSeq Sup;
Tinstantiation292824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq292848 {
TGenericSeq Sup;
Tidpair292846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq292860 {
TGenericSeq Sup;
Tnodepair292858 data[SEQ_DECL_SIZE];
};
struct TY203021 {
TGenericSeq Sup;
Filenamemapping203005 data[SEQ_DECL_SIZE];
};
struct TY203023 {
TGenericSeq Sup;
Enumdesc203007 data[SEQ_DECL_SIZE];
};
struct TY292960 {
TGenericSeq Sup;
TY292961 data[SEQ_DECL_SIZE];
};
struct TY332033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq299140 {
TGenericSeq Sup;
Tiipair299138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq332057 {
TGenericSeq Sup;
Keyvaluepair332060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0);
N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47320* s0, Cell47304* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0);
N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0);
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0);
N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0);
N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0);
static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0);
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0);
N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0);
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_192261_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_192257_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0);
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0);
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0);
N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0);
N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0);
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym292834*, getcompilerproc_338748_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0);
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0);
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0);
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0);
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0);
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0);
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0);
N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0);
N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0);
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0);
N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0);
N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0);
N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0);
N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0);
N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0);
N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009* self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0);
N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0);
N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0);
N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0);
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0);
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0);
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0);
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0);
N_NIMCALL(Tnode292802*, getbody_335226_1724185294)(Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0);
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0);
N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0);
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0);
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468);
N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0);
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0);
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0);
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0);
N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0);
N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0);
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0);
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0);
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0);
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0);
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0);
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468);
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0);
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0);
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0);
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0);
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0);
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0);
N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0);
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0);
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0);
N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0);
N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0);
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0);
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0);
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0);
N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0);
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0);
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0);
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0);
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0);
N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0);
N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0);
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0);
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void);
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result);
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0);
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0);
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0);
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0);
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0);
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0);
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0);
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0);
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0);
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0);
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, sourceline_192065_155036129)(Tlineinfo191336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0);
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0);
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0);
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0);
N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0);
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0);
N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0);
N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0);
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468);
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0);
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0);
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0);
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0);
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0);
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0);
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0);
N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0);
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0);
N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0);
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0);
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0);
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468);
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0);
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0);
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0);
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0);
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0);
N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0);
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0);
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0);
N_NIMCALL(Tnode292802*, newstrnode_293677_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0);
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0);
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0);
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0);
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0);
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0);
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0);
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0);
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0);
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0);
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0);
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0);
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0);
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0);
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0);
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0);
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0);
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0);
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0);
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0);
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0);
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0);
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0);
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0);
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0);
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0);
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0);
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0);
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0);
N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tsym292834*, skipgenericowner_297280_850551059)(Tsym292834* s0);
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0);
N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void);
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0);
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0);
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0);
N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0);
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0);
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result);
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0);
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY292016 Callingconvtostr_533587_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY273427 Cc_273413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY556765 opr_556763_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY176082 Os_176068_4151366050;
extern NIM_CONST TY176510 Cpu_176496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen529027* generatedheader_532201_839829468;
extern TNimType NTI529015; /* BModule */
Ropeobj178006* indent_532655_839829468;
extern TNimType NTI178004; /* Rope */
extern Gcheap49818 gch_49858_1689653243;
Ropeobj178006* nimtv_538656_839829468;
Ttypeseq292836* nimtvdeps_538674_839829468;
extern TNimType NTI292836; /* TTypeSeq */
Intset268030 nimtvdeclared_538675_839829468;
extern TNimType NTI268030; /* IntSet */
NI breakpointid_548860_839829468;
Ropeobj178006* gbreakpoints_548861_839829468;
extern TY529153* gmodules_529170_3723162438;
extern TNimType NTI529027; /* TCGen */
extern Debuginfo203009 gdebuginfo_203470_1926258066;
extern Toption169009Set goptions_169128_2607990831;
extern TNimType NTI292804; /* TSymSeq */
extern Tglobaloption169013Set gglobaloptions_169130_2607990831;
extern NimStringDesc* headerfile_169138_2607990831;
extern NimStringDesc* gprojectfull_169211_2607990831;
extern Tcommands169076 gcmd_169132_2607990831;
extern NI gerrorcounter_192069_155036129;
extern Ropeobj178006* rnl_178903_2381377266;
extern NI gforwardedprocscounter_529171_3723162438;
extern TNimType NTI292244; /* TTypeKind */
extern TNimType NTI203017; /* seq[(string, int)] */
extern Tsystemcc273002 ccompiler_273431_2528170400;
extern NimStringDesc* tnl_176644_4151366050;
extern NI floatsize_176642_4151366050;
extern Tgcmode169080 gselectedgc_169133_2607990831;
extern TNimType NTI292020; /* TNodeKind */
extern TNimType NTI135002; /* seq[string] */
extern TNimType NTI292435; /* TSymKind */
extern TNimType NTI292816; /* TLoc */
extern NI intsize_176641_4151366050;
extern TNimType NTI292524; /* TMagic */
extern TNimType NTI191350; /* seq[Rope] */
extern TNimType NTI292796; /* TNodeSeq */
extern Ropeobj178006* mainmodprocs_529148_3723162438;
extern Ropeobj178006* maindatinit_529151_3723162438;
extern Ropeobj178006* mainmodinit_529149_3723162438;
extern Ropeobj178006* othermodsinit_529150_3723162438;
extern Tsystemos176004 targetos_176629_4151366050;
extern TY191612* fileinfos_191629_155036129;
extern Tsystemcpu176452 targetcpu_176627_4151366050;
extern Ropeobj178006* gmapping_529152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_532201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_532655_839829468, 0);
}
static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0) {
Cell47304* result0;
result0 = (Cell47304*)0;
result0 = ((Cell47304*) ((NI)((NU64)(((NI) (usr0))) - (NU64)(((NI)sizeof(Cell47304))))));
return result0;
}
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0) {
addzct_51417_1689653243((&gch_49858_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47304* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51440_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47304* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51440_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA11;
rtladdzct_52601_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_538656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_538674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47304* c0;
c0 = usrtocell_51440_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_548861_839829468, 0);
}
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_529170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) {
NU32 result0;
Tsym292834* m0;
Tsym292834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0) {
(*c0).refcount = (NI)((NU64)((*c0).refcount) + (NU64)(((NI) 8)));
}
static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47304* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47304*)0;
LOC5 = usrtocell_51440_1689653243(src0);
incref_53419_1689653243(LOC5);
}
LA3: ;
{
Cell47304* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47304*)0;
LOC10 = usrtocell_51440_1689653243((*dest0));
decref_53001_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) {
Toption169009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_169128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_169128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen529027*)0;
result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027));
(*result0).Sup.Sup.m_type = (&NTI529027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_532977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1));
initlinkedlist_147031_3771138726((&(*result0).headerfiles));
initintset_268885_2627731572((&(*result0).declaredthings));
initintset_268885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_296019_850551059((&(*result0).typecache));
initidtable_296019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_268885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_562635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0));
initnodetable_296085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533598_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533598_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen529027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_562663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
Tcgen529027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen529027*)0;
LOC3 = getcgenmodule_532226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_9);
internalerror_196113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_563038_839829468(module0);
{
if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_192257_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_196113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopen_563112_839829468)(Tsym292834* module0) {
Tpasscontext341002* result0;
Tcgen529027* LOC1;
result0 = (Tpasscontext341002*)0;
LOC1 = (Tcgen529027*)0;
LOC1 = newmodule_563044_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_532201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_169138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_169211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14));
(*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_170073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopencached_563246_839829468)(Tsym292834* module0, Trodreader332021* rd0) {
Tpasscontext341002* result0;
Tcgen529027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext341002*)0;
m0 = newmodule_563044_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_563201_839829468(m0);
readmergeinfo_530613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_192069_155036129);
return result0;
}
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) {
{
if (!((*a0).k == ((Tlockind292808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_532960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_528847_2036603609((*(*s0).name).s);
result0 = rope_178277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
TY533289 LOC19;
Ropeobj178006* LOC20;
NU32 LOC21;
Ropeobj178006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_178482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id)));
add_178482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj178006*)0;
LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_178482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_532977_839829468(s0);
LOC22 = (Ropeobj178006*)0;
LOC22 = rope_178401_2381377266(((NI64) (LOC21)));
add_178482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) {
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_297230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
(*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_529171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) {
{
TY532811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_528835_2036603609(filename0);
LOC5[0] = rope_178277_2381377266(LOC6);
LOC5[1] = rope_178401_2381377266(((NI64) (line0)));
addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_192415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_532721_839829468(info0);
genclinedir_532725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
NI64 LOC1;
result0 = (Tctypekind529007)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
result0 = ((Tctypekind529007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 0):
case ((Ttypekind292244) 7):
{
result0 = ((Tctypekind529007) 0);
}
break;
case ((Ttypekind292244) 1):
{
result0 = ((Tctypekind529007) 2);
}
break;
case ((Ttypekind292244) 2):
{
result0 = ((Tctypekind529007) 1);
}
break;
case ((Ttypekind292244) 19):
{
result0 = mapsettype_533389_839829468(typ0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 17);
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
result0 = ((Tctypekind529007) 19);
}
break;
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 12):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC8);
}
break;
case ((Ttypekind292244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_320001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind529007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind292244) 20):
{
result0 = maptype_533394_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
case ((Ttypekind292244) 22):
{
Ttype292840* base0;
Ttype292840* LOC24;
LOC24 = (Ttype292840*)0;
LOC24 = lastson_295377_850551059(typ0);
base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 18);
}
break;
default:
{
result0 = ((Tctypekind529007) 20);
}
break;
}
}
break;
case ((Ttypekind292244) 26):
{
result0 = ((Tctypekind529007) 20);
}
break;
case ((Ttypekind292244) 24):
{
result0 = ((Tctypekind529007) 22);
}
break;
case ((Ttypekind292244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32;
result0 = ((Tctypekind529007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind529007) 19);
}
LA30: ;
}
break;
case ((Ttypekind292244) 28):
{
result0 = ((Tctypekind529007) 21);
}
break;
case ((Ttypekind292244) 29):
{
result0 = ((Tctypekind529007) 24);
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype292840*)0;
LOC43 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_320117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind292244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_320138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind529007 LOC6;
LOC6 = (Tctypekind529007)0;
LOC6 = maptype_533394_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind529007) 17):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
result0 = !(((14680064 &((NU64)1<<((NU)((*LOC8).kind)&63U)))!=0));
}
break;
case ((Tctypekind529007) 19):
{
Ttype292840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_533478_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_533478_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_533511_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_533515_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_178277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj178006*)0;
LOC12 = typename_533292_839829468(typ0);
LOC13 = (Ropeobj178006*)0;
LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_533313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_178277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 26):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind292244) 28):
{
Ropeobj178006* LOC3;
LOC3 = (Ropeobj178006*)0;
LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind292244) 29):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind292244) 1):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind292244) 2):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind292244) 5):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 15):
{
result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype292840*)0;
LOC15 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind292244) 11):
{
Ttype292840* LOC18;
LOC18 = (Ttype292840*)0;
LOC18 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0) {
Ropeobj178006* result0;
Tidobj199004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Tidobj199004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_299086_2984716966(tab0, LOC1);
result0 = ((Ropeobj178006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_533936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_533593_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
{ result0 = (Ropeobj178006*)0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_533972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
{
Tidobj199004* LOC17;
TNimObject* LOC18;
result0 = gettypename_533313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY532811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_533451_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_534001_839829468(typ0);
LOC16[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj199004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244)));
appendChar(LOC20, 41);
internalerror_196113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
(*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* etb0;
result0 = (Ropeobj178006*)0;
etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind292244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype292840* x0;
x0 = getuniquetype_528640_2036603609(etb0);
result0 = gettypeforward_534039_839829468(m0, x0);
pushtype_533958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* LOC10;
x0 = getuniquetype_528640_2036603609(etb0);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) {
Ttype292840* result0;
result0 = (Ttype292840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_295339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_534906_839829468;
NI res_534931_839829468;
i_534906_839829468 = (NI)0;
res_534931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534931_839829468 <= stars0)) goto LA8;
i_534906_839829468 = res_534931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_295339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_320394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_534931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_528847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_532960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_532973_839829468((*field0).name);
result0 = rope_178277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) {
Ropeobj178006* result0;
Ropeobj178006* ae0;
Ropeobj178006* uname0;
Ropeobj178006* sname0;
Ropeobj178006* a0;
Tnode292802* k0;
Tsym292834* field0;
{ result0 = (Ropeobj178006*)0;
ae0 = (Ropeobj178006*)0;
uname0 = (Ropeobj178006*)0;
sname0 = (Ropeobj178006*)0;
a0 = (Ropeobj178006*)0;
k0 = (Tnode292802*)0;
field0 = (Tsym292834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_534447_839829468;
NI HEX3Atmp_534620_839829468;
NI LOC3;
NI res_534623_839829468;
i_534447_839829468 = (NI)0;
HEX3Atmp_534620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295351_850551059(n0);
HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1));
res_534623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC6;
if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5;
i_534447_839829468 = res_534623_839829468;
LOC6 = (Ropeobj178006*)0;
LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC6);
res_534623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Ropeobj178006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj178006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_178277_2381377266(LOC13);
{
TY532811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_534491_839829468;
NI HEX3Atmp_534629_839829468;
NI LOC22;
NI res_534632_839829468;
i_534491_839829468 = (NI)0;
HEX3Atmp_534629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_295351_850551059(n0);
HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1));
res_534632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24;
i_534491_839829468 = res_534632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) {
case ((Tnodekind292020) 85):
case ((Tnodekind292020) 88):
{
k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]);
{
Ropeobj178006* LOC30;
TY532811 LOC31;
Ropeobj178006* LOC32;
if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468)));
sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY178507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_178482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj178006* LOC39;
LOC39 = (Ropeobj178006*)0;
LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0);
add_178482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_534632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY532811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind292020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_534361_839829468(field0, rectype0);
{
TY532811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0));
{
NIM_BOOL LOC59;
Ttype292840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_533478_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY532811 LOC68;
Ttype292840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype292840*)0;
LOC69 = elemtype_320394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_533505_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY532811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY535238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_178277_2381377266(LOC78);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY532811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
NIM_BOOL hasfield0;
Ropeobj178006* attribute0;
TY535238 LOC6;
Ropeobj178006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj178006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_534001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY533289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY532811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY178507 LOC31;
Ttype292840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype292840*)0;
LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_533505_839829468(m0, LOC32, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY178507 LOC34;
Ttype292840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype292840*)0;
LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_533505_839829468(m0, LOC35, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY178507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_534636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY533289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_178482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
TY532811 LOC1;
Ropeobj178006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_534001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_534799_839829468;
NI HEX3Atmp_534820_839829468;
NI LOC3;
NI res_534823_839829468;
i_534799_839829468 = (NI)0;
HEX3Atmp_534820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1));
res_534823_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC6;
if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5;
i_534799_839829468 = res_534823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0);
LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468)));
addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_534823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_178482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* t_534942_839829468;
{ result0 = (Ropeobj178006*)0;
t_534942_839829468 = getuniquetype_528640_2036603609(typ0);
{
if (!(t_534942_839829468 == NIM_NIL)) goto LA3;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7;
useheader_532369_839829468(m0, (*t_534942_839829468).sym);
}
LA7: ;
result0 = gettypepre_533972_839829468(m0, t_534942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_533478_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_533478_839829468(t_534942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_196113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_534942_839829468).kind) {
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
{
NimStringDesc* star0;
Ttype292840* et0;
Ttype292840* LOC38;
Ttype292840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
et0 = lastson_295377_850551059(LOC38);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
{
if (!((IL64(281475110993936) &((NU64)1<<((NU)((*etb0).kind)&63U)))!=0)) goto LA41;
et0 = elemtype_320394_3876443242(etb0);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj178006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind292244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj178006*)0;
LOC50 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype292840* x0;
Ropeobj178006* name0;
Tidobj199004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(name0, star0);
LOC52 = (Tidobj199004*)0;
LOC52 = &t_534942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_533958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* name0;
Ropeobj178006* LOC55;
Tidobj199004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
LOC55 = (Ropeobj178006*)0;
LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_178447_2381377266(LOC55, star0);
LOC56 = (Tidobj199004*)0;
LOC56 = &t_534942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
Ropeobj178006* LOC59;
Tidobj199004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj178006*)0;
LOC59 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC59, star0);
LOC60 = (Tidobj199004*)0;
LOC60 = &t_534942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Ropeobj178006* LOC63;
Tidobj199004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj178006*)0;
LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj199004*)0;
LOC64 = &t_534942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 14):
{
Ttype292840* t0;
{
if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69;
t0 = lastson_295377_850551059(t_534942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_534942_839829468;
}
LA67: ;
result0 = cachegettype_533593_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_533313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj199004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_533478_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj199004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY178507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_320001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_320135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY178507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY178507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY178507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY178507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_532977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY203017* vals0;
Enumdesc203007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_203230_1926258066((&gdebuginfo_203470_1926258066), (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY203017*) newSeq((&NTI203017), 0);
{
NI i_535144_839829468;
NI HEX3Atmp_535649_839829468;
NI LOC109;
NI res_535652_839829468;
i_535144_839829468 = (NI)0;
HEX3Atmp_535649_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_293081_850551059((*t0).n);
HEX3Atmp_535649_839829468 = (NI)(LOC109 - ((NI) 1));
res_535652_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
TY203018 LOC112;
NimStringDesc* LOC113;
if (!(res_535652_839829468 <= HEX3Atmp_535649_839829468)) goto LA111;
i_535144_839829468 = res_535652_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_535652_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI203017));
registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind292244) 25):
{
Tidobj199004* LOC116;
TNimObject* LOC117;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC116 = (Tidobj199004*)0;
LOC116 = &t_534942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC120)) goto LA121;
{
TY535235 LOC127;
if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t_534942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY535238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind292244) 24):
{
Tidobj199004* LOC144;
Ropeobj178006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY532811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_534001_839829468(t_534942_839829468);
LOC141[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj199004*)0;
LOC142 = &t_534942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj199004*)0;
LOC144 = &t_534942_839829468->Sup;
LOC145 = (Ropeobj178006*)0;
LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype292840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY532811 LOC166;
LOC154 = (Ttype292840*)0;
LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 n0;
Tidobj199004* LOC173;
TNimObject* LOC174;
n0 = lengthord_320007_3876443242(t_534942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC173 = (Tidobj199004*)0;
LOC173 = &t_534942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj178006* foo0;
TY535238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_178401_2381377266(n0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj178006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj178006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_533478_839829468(t_534942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind292244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_533313_839829468(t_534942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype292840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_178487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY533289 LOC206;
Ropeobj178006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj178006*)0;
LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_178482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj178006* LOC209;
LOC209 = (Ropeobj178006*)0;
LOC209 = gettypedescaux_533505_839829468(m0, typeinslot0, check0);
add_178482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_178487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_535516_839829468;
NI HEX3Atmp_535665_839829468;
NI LOC218;
NI res_535668_839829468;
i_535516_839829468 = (NI)0;
HEX3Atmp_535665_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_295339_850551059(typ0);
HEX3Atmp_535665_839829468 = (NI)(LOC218 - ((NI) 2));
res_535668_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC225;
if (!(res_535668_839829468 <= HEX3Atmp_535665_839829468)) goto LA220;
i_535516_839829468 = res_535668_839829468;
{
if (!(((NI) 1) < i_535516_839829468)) goto LA223;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj178006*)0;
LOC225 = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0);
add_178482_2381377266(&result0, LOC225);
res_535668_839829468 += ((NI) 1);
} LA220: ;
}
}
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj178006*)0;
LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj199004* LOC241;
TNimObject* LOC242;
Ropeobj178006* recdesc0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY532811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_534001_839829468(t_534942_839829468);
LOC238[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj199004*)0;
LOC239 = &t_534942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj199004*)0;
LOC241 = &t_534942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245;
recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC250)) goto LA251;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind292244) 19):
{
Ttype292840* LOC254;
Ropeobj178006* LOC255;
Tidobj199004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype292840*)0;
LOC254 = lastson_295377_850551059(t_534942_839829468);
LOC255 = (Ropeobj178006*)0;
LOC255 = gettypename_533313_839829468(LOC254);
result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj199004*)0;
LOC256 = &t_534942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_320135_3876443242(t_534942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY532811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY532811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_320135_3876443242(t_534942_839829468);
LOC267[1] = rope_178401_2381377266(LOC268);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC270;
LOC270 = (Ttype292840*)0;
LOC270 = lastson_295377_850551059(t_534942_839829468);
result0 = gettypedescaux_533505_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244)));
appendChar(LOC272, 41);
internalerror_196113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((IL64(576460752303423744) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
return result0;
}
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) {
Tstorageloc292812 result0;
result0 = (Tstorageloc292812)0;
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864);
if (!!(((IL64(281475110993936) &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0))) goto LA4;
result0 = ((Tstorageloc292812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc292812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
Ttype292840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind292244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_320135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
result0 = maptype_533394_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY533289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533505_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_534152_839829468;
NI HEX3Atmp_534353_839829468;
NI LOC10;
NI res_534356_839829468;
i_534152_839829468 = (NI)0;
HEX3Atmp_534353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_295351_850551059((*t0).n);
HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1));
res_534356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12;
i_534152_839829468 = res_534356_839829468;
{
Tsym292834* param0;
Ropeobj178006* LOC29;
Tstorageloc292812 LOC30;
TY533289 LOC45;
Ropeobj178006* LOC46;
Ttype292840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16;
internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY533289 LOC27;
Ropeobj178006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_178482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj178006*)0;
LOC29 = manglename_533205_839829468(param0);
LOC30 = (Tstorageloc292812)0;
LOC30 = paramstorageloc_534098_839829468(param0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_533611_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj178006*)0;
LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_178482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj178006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj178006* LOC44;
LOC44 = (Ropeobj178006*)0;
LOC44 = gettypedescaux_533505_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_178482_2381377266(params0, LOC46);
add_178482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY532811 LOC57;
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*arr0).kind)&63U)))!=0)) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_178401_2381377266(((NI64) (j0)));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_534356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype292840* arr0;
TY533289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind529007 LOC70;
Ropeobj178006* LOC73;
LOC70 = (Tctypekind529007)0;
LOC70 = mapreturntype_533447_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71;
LOC73 = (Ropeobj178006*)0;
LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC73);
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj178006* LOC75;
LOC75 = (Ropeobj178006*)0;
LOC75 = gettypedescaux_533505_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Ropeobj178006* result0;
Ropeobj178006* rettype0;
Ropeobj178006* params0;
Intset268030 check0;
Ropeobj178006* LOC13;
result0 = (Ropeobj178006*)0;
rettype0 = (Ropeobj178006*)0;
params0 = (Ropeobj178006*)0;
genclinedir_532813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
LOC13 = (Ropeobj178006*)0;
LOC13 = manglename_533205_839829468(prc0);
fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0));
genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY535235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY535238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) {
Tnode292802* result0;
result0 = (Tnode292802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) {
Tnode292802* result0;
{ result0 = (Tnode292802*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 115):
case ((Tnodekind292020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode292802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode292802* LOC13;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(n0, i0);
result0 = easyresultasgn_560191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind292020) 73):
case ((Tnodekind292020) 74):
{
{
NIM_BOOL LOC17;
Tnode292802* LOC18;
Tnode292802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode292802*)0;
LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind292020) 109):
{
{
NI LOC26;
Tnode292802* LOC29;
LOC26 = (NI)0;
LOC26 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode292802*)0;
LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_560191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
Intset268030 check0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettypedescaux_533505_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2));
{
if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_178482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY532811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8));
expr_539248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = r0;
{
NI i_532680_839829468;
NI HEX3Atmp_532683_839829468;
NI res_532686_839829468;
i_532680_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_532686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3;
i_532680_839829468 = res_532686_839829468;
prepend_178893_2381377266(&result0, indent_532655_839829468);
res_532686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
TY178507 LOC5;
if (!(((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = indentline_532656_839829468(p0, r0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) {
NI nimtypekind0;
Ropeobj178006* size0;
TY535235 LOC17;
NI flags0;
Ropeobj178006* LOC33;
TY532811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_533515_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj178006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9;
size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_535673_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_535673_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_320117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_320123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY532811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_178401_2381377266(((NI64) (flags0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC34[1] = rope_178277_2381377266(LOC35);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
TY532811 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
Ropeobj178006* expr0;
NI length0;
TY532811 LOC15;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_535945_839829468(m0);
length0 = sonslen_295327_850551059(typ0);
{
Ropeobj178006* tmp0;
TY532811 LOC6;
TY535238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_536573_839829468;
NI HEX3Atmp_536592_839829468;
NI res_536595_839829468;
i_536573_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)(length0 - ((NI) 1));
res_536595_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* a0;
Ropeobj178006* tmp20;
TY535238 LOC10;
TY535235 LOC11;
if (!(res_536595_839829468 <= HEX3Atmp_536592_839829468)) goto LA9;
i_536573_839829468 = res_536595_839829468;
a0 = (*typ0).sons->data[i_536573_839829468];
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC10[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_535673_839829468(m0, typ0);
LOC11[2] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC11[3] = gentypeinfo_535941_839829468(m0, a0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_536595_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_178401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY532811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) {
Ttype292840* result0;
Ttype292840* LOC1;
Ttype292840* r0;
Ttype292840* LOC2;
result0 = (Ttype292840*)0;
result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
LOC1 = (Ttype292840*)0;
LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0);
rawaddson_296394_850551059(result0, LOC1);
r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0);
LOC2 = (Ttype292840*)0;
LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
rawaddson_296394_850551059(r0, LOC2);
rawaddson_296394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* base0;
base0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype292840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_295327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_535941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((983056 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
NIM_BOOL LOC3;
Tctypekind529007 LOC5;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind529007)0;
LOC5 = maptype_533394_839829468((*a0).t);
LOC3 = !((LOC5 == ((Tctypekind529007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0) {
Ttypefieldresult320145 LOC1;
LOC1 = (Ttypefieldresult320145)0;
LOC1 = analyseobjectwithtypefield_320149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult320145) 0):
{
}
break;
case ((Ttypefieldresult320145) 1):
{
Ropeobj178006* r0;
Ttype292840* s0;
TY532811 LOC19;
r0 = rdloc_538188_839829468(a0);
{
TY178507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind292244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult320145) 2):
{
Ropeobj178006* r0;
TY532811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_538204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_538188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0) {
Ttype292840* typ0;
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(loc0);
LOC6[1] = gettypedesc_535673_839829468((*p0).module, typ0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY532811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(loc0);
LOC19[1] = rdloc_538188_839829468(loc0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) {
Ropeobj178006* LOC1;
TY532811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind292808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc292812) 2);
(*result0).flags = 0;
constructloc_538388_839829468(p0, (&(*result0)), !(needsinit0));
}
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
TY178507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_178401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY533289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY178507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY533289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
TY535238 LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_533598_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_191638_155036129(s0);
LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 5):
{
result0 = intliteral_539270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 1):
{
{
TY533289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind292244) 35):
{
result0 = int64literal_549430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 44):
{
result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY532811 LOC19;
Ttype292840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype292840*)0;
LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_535673_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj178006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY532811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
{
TY533289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype292840* LOC42;
NI id0;
LOC42 = (Ttype292840*)0;
LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY178507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY532811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_178401_2381377266(((NI64) (id0)));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_191638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_178277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendChar(LOC56, 41);
internalerror_196100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genliteral_549476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) {
NI length0;
length0 = len_293081_850551059(branch0);
{
NI j_547677_839829468;
NI HEX3Atmp_547718_839829468;
NI res_547721_839829468;
j_547677_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)(length0 - ((NI) 2));
res_547721_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547721_839829468 <= HEX3Atmp_547718_839829468)) goto LA3;
j_547677_839829468 = res_547721_839829468;
{
Tnode292802* LOC6;
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7;
{
TY532811 LOC13;
Tnode292802* LOC14;
Tnode292802* LOC15;
Tnode292802* LOC16;
Tnode292802* LOC17;
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode292802*)0;
LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_539273_839829468(p0, LOC15);
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC17 = (Tnode292802*)0;
LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_539273_839829468(p0, LOC17);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode292802* v0;
Tnode292802* LOC19;
Tnode292802* LOC20;
LOC19 = (Tnode292802*)0;
LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0));
v0 = copynode_296528_850551059(LOC20);
{
while (1) {
Tnode292802* LOC23;
Tnode292802* LOC24;
TY178507 LOC25;
LOC23 = (Tnode292802*)0;
LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC24 = (Tnode292802*)0;
LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_539273_839829468(p0, v0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY178507 LOC27;
Tnode292802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC27[0] = genliteral_539273_839829468(p0, LOC28);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_547721_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_537068_839829468;
NI HEX3Atmp_537239_839829468;
NI LOC7;
NI res_537242_839829468;
i_537068_839829468 = (NI)0;
HEX3Atmp_537239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_295351_850551059(n0);
HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1));
res_537242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9;
i_537068_839829468 = res_537242_839829468;
gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]);
res_537242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Tcproc529021* p0;
Tsym292834* disc0;
TY532811 LOC15;
TY533289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_537098_839829468;
NI HEX3Atmp_537249_839829468;
NI LOC17;
NI res_537252_839829468;
i_537098_839829468 = (NI)0;
HEX3Atmp_537249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(n0);
HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1));
res_537252_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC26;
TY533289 LOC27;
if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19;
i_537098_839829468 = res_537252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468];
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22;
gencaserange_537028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY533289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode292802*)0;
LOC26 = lastson_295364_850551059(branch0);
gentraverseproc_537039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_537252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
TY532811 LOC34;
Ropeobj178006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj178006*)0;
LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) {
Ttype292840* typ_537302_839829468;
Tcproc529021* p0;
{ {
if (!(typ_537027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468);
p0 = (*c0).p;
switch ((*typ_537302_839829468).kind) {
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = lastson_295377_850551059(typ_537302_839829468);
gentraverseproc_537022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 arraysize0;
Tloc292816 i0;
Ttype292840* LOC8;
TY532811 LOC9;
TY532811 LOC10;
Ropeobj178006* LOC11;
TY533289 LOC12;
arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype292840*)0;
LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_178401_2381377266(arraysize0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind292244) 17):
{
{
NI i_537325_839829468;
NI HEX3Atmp_537384_839829468;
NI LOC15;
NI res_537387_839829468;
i_537325_839829468 = (NI)0;
HEX3Atmp_537384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_295327_850551059(typ_537302_839829468);
HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1));
res_537387_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* x0;
Ropeobj178006* LOC22;
if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17;
i_537325_839829468 = res_537387_839829468;
x0 = (*typ_537302_839829468).sons->data[i_537325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj178006*)0;
LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_537022_839829468(c0, LOC22, x0);
res_537387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind292244) 18):
{
Ttype292840* typ0;
typ0 = getuniquetype_528640_2036603609(typ_537302_839829468);
{
NI i_537363_839829468;
NI HEX3Atmp_537392_839829468;
NI LOC29;
NI res_537395_839829468;
i_537363_839829468 = (NI)0;
HEX3Atmp_537392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_295327_850551059(typ0);
HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1));
res_537395_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC32;
Ropeobj178006* LOC33;
if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31;
i_537363_839829468 = res_537395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468)));
LOC33 = (Ropeobj178006*)0;
LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]);
res_537395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY178507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind292244) 25):
{
{
TY178507 LOC41;
TY178507 LOC42;
if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) {
Tcproc529021* p0;
Tloc292816 i0;
Ttype292840* LOC1;
TY535238 LOC2;
NimStringDesc* LOC3;
TY532811 LOC11;
Ropeobj178006* LOC12;
TY533289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype292840*)0;
LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_178277_2381377266(LOC3);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) {
Ropeobj178006* result0;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* header0;
TY178507 LOC3;
Ropeobj178006* t0;
TY178507 LOC4;
TY178507 LOC5;
Ropeobj178006* generatedproc0;
TY535235 LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
Ropeobj178006** LOC23;
TY178507 LOC24;
result0 = (Ropeobj178006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
result0 = gettempname_533598_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason537016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_535673_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj178006* LOC10;
if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8;
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_537399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype292840* LOC14;
Ropeobj178006* LOC17;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((65552 &((NU64)1<<((NU)((*LOC14).kind)&63U)))!=0)) goto LA15;
LOC17 = (Ropeobj178006*)0;
LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj178006* LOC19;
LOC19 = (Ropeobj178006*)0;
LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj178006**)0;
LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY535238 LOC1;
NI64 LOC2;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_535945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(typ0);
LOC1[1] = rope_178401_2381377266(LOC2);
LOC1[2] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* nodeptrs0;
NI length0;
TY532811 LOC1;
Ropeobj178006* enumnames0;
Ropeobj178006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj178006* enumarray0;
Ropeobj178006* counter0;
TY178507 LOC24;
TY535238 LOC25;
TY536847 LOC26;
TY535235 LOC27;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_533598_839829468(m0);
length0 = sonslen_295351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj178006*)0;
specialcases0 = (Ropeobj178006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_536624_839829468;
NI HEX3Atmp_536860_839829468;
NI res_536863_839829468;
i_536624_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1));
res_536863_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
Ropeobj178006* elemnode0;
if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4;
i_536624_839829468 = res_536863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536624_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_535945_839829468(m0);
{
Ropeobj178006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = makecstring_191638_155036129((*(*field0).name).s);
add_178482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj178006* LOC11;
LOC11 = (Ropeobj178006*)0;
LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval);
add_178482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_536624_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_176644_4151366050);
add_178487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY532811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_536624_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position)));
addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_536863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_533598_839829468(m0);
counter0 = gettempname_533598_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_178401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_178401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_535945_839829468(m0);
LOC27[1] = rope_178401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY178507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) {
Ropeobj178006* result0;
Ttype292840* objtype0;
TY532811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj178006*)0;
objtype0 = objtype_536060_839829468;
{
while (1) {
Tsym292834* LOC3;
LOC3 = (Tsym292834*)0;
LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_528847_2036603609((*(*d0).name).s);
LOC8[1] = rope_178277_2381377266(LOC9);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
NI L0;
L0 = sonslen_295351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj178006* tmp0;
TY532811 LOC9;
TY535238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_536127_839829468;
NI HEX3Atmp_536482_839829468;
NI res_536485_839829468;
i_536127_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1));
res_536485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* tmp20;
TY535238 LOC13;
if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12;
i_536127_839829468 = res_536485_839829468;
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468)));
LOC13[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20);
res_536485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind292020) 139):
{
Tsym292834* field0;
Ropeobj178006* tmp0;
NI64 L0;
TY536401 LOC18;
TY532811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0);
L0 = lengthord_320007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_535673_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_191638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_178401_2381377266(L0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_536421_839829468;
NI HEX3Atmp_536501_839829468;
NI LOC21;
NI res_536504_839829468;
i_536421_839829468 = (NI)0;
HEX3Atmp_536501_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_295351_850551059(n0);
HEX3Atmp_536501_839829468 = (NI)(LOC21 - ((NI) 1));
res_536504_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* b0;
Ropeobj178006* tmp20;
Tnode292802* LOC24;
if (!(res_536504_839829468 <= HEX3Atmp_536501_839829468)) goto LA23;
i_536421_839829468 = res_536504_839829468;
b0 = (*n0).kindU.S6.sons->data[i_536421_839829468];
tmp20 = getnimnode_535945_839829468(m0);
LOC24 = (Tnode292802*)0;
LOC24 = lastson_295364_850551059(b0);
genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind292020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_295351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_536436_839829468;
NI HEX3Atmp_536494_839829468;
NI LOC32;
NI res_536497_839829468;
j_536436_839829468 = (NI)0;
HEX3Atmp_536494_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_295351_850551059(b0);
HEX3Atmp_536494_839829468 = (NI)(LOC32 - ((NI) 2));
res_536497_839829468 = ((NI) 0);
{
while (1) {
if (!(res_536497_839829468 <= HEX3Atmp_536494_839829468)) goto LA34;
j_536436_839829468 = res_536497_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY535238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_178401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY535238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]);
LOC45[1] = rope_178401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_536497_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind292020) 88):
{
TY535238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_178401_2381377266(L0);
LOC48[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_536504_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY536475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_535673_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_191638_155036129((*(*field0).name).s);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY532811 LOC12;
Ttype292840* t0;
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3;
gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj178006* LOC6;
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_535945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_296099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) {
TY532811 LOC1;
genproc_532951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) {
Ropeobj178006* result0;
Ttype292840* origtype0;
Ttype292840* t0;
TY178507 LOC1;
Tsym292834* owner0;
Ttype292840* LOC12;
Ropeobj178006* LOC66;
Ropeobj178006* LOC67;
Ropeobj178006* LOC68;
{ result0 = (Ropeobj178006*)0;
origtype0 = t_535944_839829468;
t0 = getuniquetype_528640_2036603609(t_535944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj178006*)0;
LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178418_2381377266(LOC7, result0);
LOC9 = (Ropeobj178006*)0;
LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11;
t0 = lastson_295377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype292840*)0;
LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_299123_2984716966((*LOC12).owner);
{
Tcgen529027* LOC17;
Ropeobj178006* LOC18;
Ropeobj178006* LOC19;
Ropeobj178006* LOC20;
TY532811 LOC21;
NimStringDesc* LOC22;
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
Ropeobj178006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen529027*)0;
LOC17 = bmod_529201_3723162438(owner0);
LOC18 = (Ropeobj178006*)0;
LOC18 = gentypeinfo_535941_839829468(LOC17, t0);
LOC19 = (Ropeobj178006*)0;
LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj178006*)0;
LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0));
LOC21[1] = rope_178277_2381377266(LOC22);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178418_2381377266(LOC23, result0);
LOC25 = (Ropeobj178006*)0;
LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 23):
{
Ropeobj178006* LOC28;
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype292840*)0;
LOC34 = lastson_295377_850551059(t0);
result0 = gentypeinfo_535941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC36, 41);
internalerror_196113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind292244) 25):
{
{
Ropeobj178006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype292840* LOC44;
LOC44 = (Ttype292840*)0;
LOC44 = fakeclosuretype_537010_839829468((*t0).owner);
gentupleinfo_536551_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 22):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
{
Ropeobj178006* markerproc0;
TY532811 LOC50;
if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 20):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
genarrayinfo_537005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 19):
{
gensetinfo_536867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 14):
{
genenuminfo_536599_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 17):
{
genobjectinfo_536508_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleinfo_536551_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC58, 41);
internalerror_196113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj178006*)0;
LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj178006*)0;
LOC67 = HEX26_178418_2381377266(LOC66, result0);
LOC68 = (Ropeobj178006*)0;
LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* a0;
TY535235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype292840* LOC7;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC7).kind)&63U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind292435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_533611_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_191638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* decl0;
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006*)0;
LOC1 = localvardecl_538532_839829468(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050);
line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0);
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_538388_839829468(p0, (&(*v0).loc), NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) {
TY533289 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2));
{
NIM_BOOL LOC5;
Tctypekind529007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind529007)0;
LOC6 = mapreturntype_533447_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind529007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_533550_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) {
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) {
Tnode292802* ls0;
Tnode292802* LOC5;
Tsym292834* env0;
TY532811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_295364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_538614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&(*env0).loc));
LOC10[1] = gettypedesc_535673_839829468((*p0).module, (*env0).typ);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj178006* LOC6;
TY535235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj178006*)0;
LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY532811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY533289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
TY533289 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Tcproc529021* p0;
Ropeobj178006* header0;
Ropeobj178006* returnstmt0;
Tnode292802* LOC51;
Ropeobj178006* generatedproc0;
p0 = newproc_529206_3723162438(prc0, m0);
header0 = genprocheader_535867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym292834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_293081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY178507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_533550_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode292802* val0;
Tnode292802* LOC29;
Ropeobj178006* decl0;
Tloc292816 a0;
TY532811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode292802*)0;
LOC29 = getbody_335226_1724185294(prc0);
val0 = easyresultasgn_560191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_538532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_539289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_538614_839829468(p0, res0);
initlocalvar_538398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&(*res0).loc));
returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_533865_839829468(res0);
assignparam_538994_839829468(p0, res0);
{
Ttype292840* LOC38;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc292812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_560627_839829468;
NI HEX3Atmp_560743_839829468;
NI LOC42;
NI res_560746_839829468;
i_560627_839829468 = (NI)0;
HEX3Atmp_560743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059((*(*prc0).typ).n);
HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1));
res_560746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44;
i_560627_839829468 = res_560746_839829468;
{
Tsym292834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_538994_839829468(p0, param0);
} LA45: ;
res_560746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_560158_839829468(p0, prc0);
LOC51 = (Tnode292802*)0;
LOC51 = getbody_335226_1724185294(prc0);
genstmts_539244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj178006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY535235 LOC68;
Ropeobj178006** LOC69;
Ropeobj178006** LOC70;
Ropeobj178006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj178006**)0;
LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj178006**)0;
LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj178006**)0;
LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY178507 LOC73;
Ropeobj178006* LOC74;
Ropeobj178006** LOC93;
Ropeobj178006** LOC94;
Ropeobj178006* LOC101;
TY533289 LOC107;
Ropeobj178006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj178006*)0;
LOC74 = initgcframe_538435_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC74);
{
Ropeobj178006** LOC79;
Ropeobj178006* procname0;
Ropeobj178006* LOC80;
Ropeobj178006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj178006**)0;
LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_191638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj178006*)0;
LOC80 = quotedfilename_196818_155036129((*prc0).info);
LOC81 = (Ropeobj178006*)0;
LOC81 = initframe_560140_839829468(p0, procname0, LOC80);
add_178482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj178006** LOC83;
LOC83 = (Ropeobj178006**)0;
LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY533289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj178006**)0;
LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj178006**)0;
LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&generatedproc0, (*LOC94));
{
TY533289 LOC99;
Ropeobj178006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj178006*)0;
LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_178482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj178006*)0;
LOC101 = deinitgcframe_538441_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC101);
{
Ropeobj178006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj178006*)0;
LOC106 = deinitframe_560150_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_178482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj178006*)0;
LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_178482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Tcgen529027* result0;
Tsym292834* ms0;
result0 = (Tcgen529027*)0;
ms0 = getmodule_299123_2984716966(s0);
result0 = gmodules_529170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0) {
NIM_BOOL result0;
Tnode292802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((100663296 &((NU64)1<<((NU)((*(*n0).typ).kind)&63U)))!=0);
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
expr_539248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0) {
{
Ropeobj178006* tmp0;
TY178507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_533598_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY135002* s0;
Ropeobj178006* loadlib0;
TY532811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8;
s0 = (TY135002*) newSeq((&NTI135002), 0);
libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_559847_839829468;
NI HEX3Atmp_559902_839829468;
NI res_559905_839829468;
i_559847_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_559905_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC17;
if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12;
i_559847_839829468 = res_559905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_559847_839829468)) goto LA15;
add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]);
appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_559905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc529021* p0;
Tloc292816 dest0;
Ropeobj178006** LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
TY532811 LOC23;
p0 = newproc_529206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj178006**)0;
LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_538188_839829468((&dest0));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
NIM_BOOL iscall0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY532811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_559443_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_559481_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode292802* n0;
Tloc292816 a0;
Tnode292802* LOC9;
Ropeobj178006* params0;
Ropeobj178006* LOC10;
Ropeobj178006* load0;
TY535235 LOC17;
NimStringDesc* LOC18;
Tnode292802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode292802*)0;
LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj178006*)0;
LOC10 = rdloc_538188_839829468((&a0));
params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_559964_839829468;
NI HEX3Atmp_560025_839829468;
NI LOC12;
NI res_560028_839829468;
i_559964_839829468 = (NI)0;
HEX3Atmp_560025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_293081_850551059(n0);
HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2));
res_560028_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* LOC15;
Ropeobj178006* LOC16;
if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14;
i_559964_839829468 = res_560028_839829468;
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468);
initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj178006*)0;
LOC16 = rdloc_538188_839829468((&a0));
add_178482_2381377266(¶ms0, LOC16);
add_178487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_560028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_178856_2381377266(extname0);
LOC17[3] = makecstring_191638_155036129(LOC18);
load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_295364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_196185_1689653243(T839829468_236);
internalerror_196113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj178006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_196100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY535235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_178856_2381377266(extname0);
LOC41[3] = makecstring_191638_155036129(LOC42);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ fillprocloc_539201_839829468(prc0);
useheader_532369_839829468(m0, prc0);
{
Ropeobj178006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_539254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_560284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen529027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_559929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_560071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen529027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_560284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_561431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_539201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_532203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_560906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_532201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_560284_839829468(generatedheader_532201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY532811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_538674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj178006* LOC21;
TY178507 LOC22;
{
if (!isextern0) goto LA15;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj178006*)0;
LOC21 = gettypedesc_535673_839829468(m0, (*s0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Ropeobj178006* LOC1;
{ useheader_532369_839829468(m0, sym0);
LOC1 = (Ropeobj178006*)0;
LOC1 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj178006* LOC17;
TY178507 LOC30;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj178006*)0;
LOC17 = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
genvarprototypeaux_544254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) {
Ropeobj178006* result0;
Tsym292834* sym0;
result0 = (Ropeobj178006*)0;
sym0 = getcompilerproc_338748_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 13):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
genproc_532951_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
genvarprototype_539236_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 7):
{
Ropeobj178006* LOC8;
LOC8 = (Ropeobj178006*)0;
LOC8 = gettypedesc_535673_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435)));
internalerror_196113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_194612_155036129(((Tmsgkind191002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_178482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_196113_155036129(LOC22);
}
LA20: ;
add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27;
add_178482_2381377266(&result0, rnl_178903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_178482_2381377266(&result0, rnl_178903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_196113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj178006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj178006*)0;
LOC39 = cgsym_532403_839829468(m0, ident0);
add_178482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj178006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj178006*)0;
LOC48 = cgsym_532403_839829468(m0, LOC47);
add_178482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym292834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym292834*)0;
LOC4 = getmodule_299123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{ useheader_532369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym292834* LOC12;
NIM_BOOL LOC14;
TY532811 LOC17;
Ropeobj178006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym292834*)0;
LOC12 = getmodule_299123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_538816_839829468(sym0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj178006* header0;
TY178507 LOC47;
Ropeobj178006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_535867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_560754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj178006*)0;
LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_169177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(dest0);
LOC8[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY532811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY532811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY532811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_538204_839829468(dest0);
LOC21[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) {
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
(*Result).k = ((Tlockind292808) 5);
(*Result).s = (*a0).s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Tassignmentflag538302Set newflags0;
Ttype292840* t_550053_839829468;
Ttype292840* LOC9;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*dest0).t, IL64(211106232576256));
t_550053_839829468 = getuniquetype_528640_2036603609(LOC9);
{
NI i_550071_839829468;
NI HEX3Atmp_550077_839829468;
NI LOC11;
NI res_550080_839829468;
i_550071_839829468 = (NI)0;
HEX3Atmp_550077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_295339_850551059(t_550053_839829468);
HEX3Atmp_550077_839829468 = (LOC11 - 1);
res_550080_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* t0;
Ropeobj178006* field0;
TY178507 LOC14;
Tloc292816 LOC15;
Tloc292816 LOC16;
if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13;
i_550071_839829468 = res_550080_839829468;
t0 = (*t_550053_839829468).sons->data[i_550071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468)));
field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_549789_839829468(src0, t0, field0, (&LOC16));
genassignment_539264_839829468(p0, (&LOC15), (&LOC16), newflags0);
res_550080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY535238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_169177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_538204_839829468(dest0);
LOC15[1] = addrloc_538204_839829468(src0);
LOC15[2] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = addrloc_538204_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY535238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = addrloc_538204_839829468(src0);
LOC19[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind292020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* t_549768_839829468;
t_549768_839829468 = (Tnode292802*)0;
{
NI i_549782_839829468;
NI HEX3Atmp_549784_839829468;
NI LOC10;
NI res_549786_839829468;
i_549782_839829468 = (NI)0;
HEX3Atmp_549784_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
HEX3Atmp_549784_839829468 = (LOC10 - 1);
res_549786_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_549786_839829468 <= HEX3Atmp_549784_839829468)) goto LA12;
i_549782_839829468 = res_549786_839829468;
t_549768_839829468 = (*n0).kindU.S6.sons->data[i_549782_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_549751_839829468(t_549768_839829468);
result0 += LOC13;
res_549786_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0) {
Tassignmentflag538302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
Tloc292816 LOC14;
Tloc292816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_549789_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_539264_839829468(p0, (&LOC14), (&LOC15), newflags0);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* child_550155_839829468;
child_550155_839829468 = (Tnode292802*)0;
{
NI i_550160_839829468;
NI HEX3Atmp_550162_839829468;
NI LOC19;
NI res_550164_839829468;
i_550160_839829468 = (NI)0;
HEX3Atmp_550162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_293081_850551059(t0);
HEX3Atmp_550162_839829468 = (LOC19 - 1);
res_550164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21;
i_550160_839829468 = res_550164_839829468;
child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468];
genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468);
res_550164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
TY532811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*src0).t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*(*src0).t).kind == ((Ttypekind292244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(dest0);
LOC7[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind292244) 22):
{
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind292244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY532811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_169177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468(dest0);
LOC33[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc292816 tmp0;
TY535238 LOC37;
TY178507 LOC38;
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(dest0);
LOC37[1] = rdloc_538188_839829468(src0);
LOC37[2] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_538204_839829468(dest0);
LOC40[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind292244) 25):
{
{
NIM_BOOL LOC44;
Tloc292816 a0;
Ropeobj178006* LOC47;
Tloc292816 LOC48;
Tloc292816 b0;
Ropeobj178006* LOC49;
Tloc292816 LOC50;
TY532811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj178006*)0;
LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_549789_839829468(dest0, (*dest0).t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj178006*)0;
LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_549789_839829468(src0, (*dest0).t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_538311_839829468(p0, (&a0), (&b0), flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(dest0);
LOC51[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY532811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_538188_839829468(dest0);
LOC53[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_295339_850551059((*dest0).t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_550001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY532811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_538188_839829468(dest0);
LOC67[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC71;
TY532811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_533478_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_538188_839829468(dest0);
LOC74[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_533515_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_533511_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj178006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_549751_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj178006*)0;
LOC90 = gettypedesc_535673_839829468((*p0).module, ty0);
ty0 = getuniquetype_528640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_196185_1689653243(T839829468_264);
internalerror_196113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY532811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_538188_839829468(dest0);
LOC98[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC102) goto LA103;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY535238 LOC106;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_538188_839829468(dest0);
LOC106[1] = rdloc_538188_839829468(src0);
LOC106[2] = gettypedesc_535673_839829468((*p0).module, ty0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
NIM_BOOL LOC110;
TY535238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_538204_839829468(dest0);
LOC113[1] = addrloc_538204_839829468(src0);
LOC113[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY532811 LOC115;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_538188_839829468(dest0);
LOC115[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC119;
TY535238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind529007)0;
LOC119 = maptype_533394_839829468(ty0);
if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_538188_839829468(dest0);
LOC122[1] = rdloc_538188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_320135_3876443242((*dest0).t);
LOC122[2] = rope_178401_2381377266(LOC123);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY532811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_538188_839829468(dest0);
LOC125[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_538188_839829468(dest0);
LOC127[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0) {
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)s0, (&NTI292816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
Ttype292840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((17760272 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_549612_839829468;
NI HEX3Atmp_549622_839829468;
NI res_549625_839829468;
j_549612_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1));
res_549625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3;
j_549612_839829468 = res_549625_839829468;
{
if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_549625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) {
Ropeobj178006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj178006*)0;
frmt0 = (NimStringDesc*)0;
{
TY533289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_549649_839829468;
NI HEX3Atmp_549657_839829468;
NI res_549660_839829468;
i_549649_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1));
res_549660_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC19;
NimStringDesc* LOC20;
if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8;
i_549649_839829468 = res_549660_839829468;
{
if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2));
LOC19[0] = rope_178277_2381377266(LOC20);
addf_179205_2381377266(&result0, frmt0, LOC19, 1);
res_549660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_549578_839829468(cs0, size0);
result0 = intliteral_539270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) {
Ropeobj178006* result0;
Ropeobj178006* data0;
TY178507 LOC1;
NI LOC2;
TY535235 LOC18;
NI LOC19;
TY532811 LOC20;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
LOC1[0] = rope_178401_2381377266(((NI64) (LOC2)));
data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_559395_839829468;
NI HEX3Atmp_559411_839829468;
NI LOC9;
NI res_559414_839829468;
i_559395_839829468 = (NI)0;
HEX3Atmp_559411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_293081_850551059(n0);
HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1));
res_559414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC17;
if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11;
i_559395_839829468 = res_559414_839829468;
{
TY533289 LOC16;
if (!(((NI) 0) < i_559395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj178006*)0;
LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]);
add_178482_2381377266(&data0, LOC17);
res_559414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_293081_850551059(n0);
LOC18[1] = rope_178401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3;
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_554849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
NI length0;
TY533289 LOC10;
result0 = (Ropeobj178006*)0;
length0 = sonslen_295351_850551059(n0);
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_559333_839829468;
NI HEX3Atmp_559362_839829468;
NI HEX3Atmp_559363_839829468;
NI res_559366_839829468;
i_559333_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = (NI)0;
HEX3Atmp_559363_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38));
HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2));
res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468));
{
while (1) {
TY178507 LOC4;
if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3;
i_559333_839829468 = res_559366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_559366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj178006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_178482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
{
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind292020) 39):
{
Tbitset339004* cs0;
NI64 LOC3;
cs0 = (Tbitset339004*)0;
tobitset_340001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_320135_3876443242((*n0).typ);
result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind292020) 41):
case ((Tnodekind292020) 37):
case ((Tnodekind292020) 155):
case ((Tnodekind292020) 38):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7;
result0 = genconstseq_559371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_559299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc292816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_539283_839829468(p0, n0, (&d0));
result0 = rdloc_538188_839829468((&d0));
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) {
Tcgen529027* m0;
Tcgen529027* q0;
{ m0 = (*p0).module;
useheader_532369_839829468(m0, sym0);
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_532241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY535238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast);
addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj178006* headerdecl0;
TY532811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_532201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) {
requestconstimpl_539240_839829468(p0, sym0);
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{
NIM_BOOL LOC3;
Ropeobj178006** LOC7;
TY533289 LOC8;
Ropeobj178006** LOC9;
TY533289 LOC10;
Ropeobj178006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj178006**)0;
LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_178482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((IL64(4611686018427388032) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI line0;
Ropeobj178006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_532721_839829468((*t0).info);
{
Ropeobj178006** LOC5;
TY533289 LOC6;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
Ropeobj178006* LOC10;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006**)0;
LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj178006*)0;
LOC8 = sourceline_192065_155036129((*t0).info);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266);
add_178482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_192261_155036129((*t0).info.fileindex);
genclinedir_532725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY532811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_178401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_192257_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_191638_155036129(LOC27);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY532811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_178401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_196818_155036129((*t0).info);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) {
TY178507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Ropeobj178006* L0;
Tloc292816 tmp0;
L0 = (Ropeobj178006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_539217_839829468(p0);
{
TY532811 LOC5;
if (!(m0 == ((Tmagic292524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&tmp0));
LOC5[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY532811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&tmp0));
LOC7[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_539230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA8;
LA10: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Ttype292840* t0;
TY535238 LOC1;
NI64 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(t0);
LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Ttype292840* t0;
TY532811 LOC7;
NI64 LOC8;
Ropeobj178006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
TY532811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (NI64)0;
LOC6 = firstord_320001_3876443242(t0);
LOC5[1] = intliteral_539270_839829468(LOC6);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242(t0);
LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY535235 LOC3;
Ropeobj178006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_538188_839829468((&a0));
LOC3[1] = rdloc_538188_839829468((&b0));
LOC3[2] = rope_178401_2381377266(s0);
LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj178006*)0;
LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
{
Tloc292816 a0;
Tloc292816 b0;
TY535235 LOC5;
Tnode292802* LOC6;
Ropeobj178006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(opr_556763_839829468[(m0)- 52]);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj178006*)0;
LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
{
TY178507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY178507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_551819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC3;
TY532811 LOC6;
Ropeobj178006* LOC7;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468((&a0));
LOC6[1] = rdloc_538188_839829468((&b0));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
}
goto LA1;
LA4: ;
{
TY532811 LOC9;
Ropeobj178006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_538188_839829468((&a0));
LOC9[1] = rdloc_538188_839829468((&b0));
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdloc_538188_839829468(a0);
{
Ttype292840* LOC3;
TY178507 LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*a0).t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0) {
Ropeobj178006* result0;
NI64 size0;
Ropeobj178006* storage0;
TY532811 LOC6;
TY535238 LOC7;
result0 = (Ropeobj178006*)0;
size0 = getsize_320135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3;
storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_535673_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_538227_839829468(a0);
LOC7[2] = rdcharloc_538227_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY535238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_176641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((1064960 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_320001_3876443242(t0);
LOC14[1] = intliteral_539270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_320004_3876443242(t0);
LOC14[2] = intliteral_539270_839829468(LOC16);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj178006* res0;
TY535238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj178006* res0;
NimStringDesc* LOC7;
TY532811 LOC13;
Ropeobj178006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10;
LOC7 = copyString(prc64_551274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_551269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, t0, (&a0), (&b0), LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj178006*)0;
LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_311044_382274130(n0, 0);
message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35):
case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44):
{
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
TY178507 LOC5;
Ropeobj178006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind292244) 1):
{
TY178507 LOC8;
Ropeobj178006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind292244) 2):
{
TY178507 LOC11;
Ropeobj178006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 15):
{
TY532811 LOC14;
Ropeobj178006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&a0));
LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC15 = (Ropeobj178006*)0;
LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind292244) 28):
{
TY178507 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind292244) 19):
{
TY532811 LOC20;
Ropeobj178006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_538204_839829468((&a0));
LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC21 = (Ropeobj178006*)0;
LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Tloc292816 b0;
TY532811 LOC34;
Ttype292840* LOC35;
Ropeobj178006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC24;
Ropeobj178006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_538188_839829468((&a0));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC27;
Ropeobj178006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_538188_839829468((&a0));
LOC27[1] = lenfield_539305_839829468(p0);
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC30;
NI64 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_538188_839829468((&a0));
LOC31 = (NI64)0;
LOC31 = lengthord_320007_3876443242(a0.t);
LOC30[1] = rope_178401_2381377266(LOC31);
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&b0));
LOC35 = (Ttype292840*)0;
LOC35 = elemtype_320394_3876443242(t0);
LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj178006*)0;
LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 5):
case ((Ttypekind292244) 24):
{
TY532811 LOC38;
Ropeobj178006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&a0));
LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC39 = (Ropeobj178006*)0;
LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY532811 LOC42;
Ropeobj178006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_538204_839829468((&a0));
LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC43 = (Ropeobj178006*)0;
LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 tmp0;
Ttype292840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_539264_839829468(p0, (&tmp0), (&a0), 0);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
genassignment_539264_839829468(p0, (&b0), (&tmp0), 0);
}
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC5[1] = rdloc_538188_839829468((&b0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 tmp0;
NI L0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_554475_839829468;
NI HEX3Atmp_554547_839829468;
NI LOC2;
NI res_554550_839829468;
i_554475_839829468 = (NI)0;
HEX3Atmp_554547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2));
res_554550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4;
i_554475_839829468 = res_554550_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA23;
LA25: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA23: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 dest0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
NI L0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj178006*)0;
lens0 = (Ropeobj178006*)0;
L0 = ((NI) 0);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_554615_839829468;
NI HEX3Atmp_554676_839829468;
NI LOC2;
NI res_554679_839829468;
i_554615_839829468 = (NI)0;
HEX3Atmp_554676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3));
res_554679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4;
i_554615_839829468 = res_554679_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&dest0));
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&dest0));
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468((&dest0));
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
NimStringDesc* seqappendpattern0;
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 dest0;
Ttype292840* bt0;
TY535238 LOC8;
Ttype292840* LOC9;
TY532811 LOC10;
TY532811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_535673_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_535673_839829468((*p0).module, bt0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3);
initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&a0));
LOC10[1] = lenfield_539305_839829468(p0);
dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_539264_839829468(p0, (&dest0), (&b0), 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC11[1] = lenfield_539305_839829468(p0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdloc_538188_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 x0;
Tnode292802* a0;
Tnode292802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind292020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind292020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY532811 LOC12;
Ropeobj178006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&x0));
LOC12[1] = lenfield_539305_839829468(p0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY532811 LOC19;
Ropeobj178006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&x0));
LOC19[1] = lenfield_539305_839829468(p0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA4;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
gcusage_554439_839829468(n0);
}
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) {
Ropeobj178006* result0;
Ropeobj178006* ti0;
result0 = (Ropeobj178006*)0;
ti0 = gentypeinfo_535941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj178006* LOC11;
Ropeobj178006* cache0;
Ropeobj178006* LOC12;
TY178507 LOC13;
TY535238 LOC14;
LOC11 = (Ropeobj178006*)0;
LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj178006*)0;
LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
Ttype292840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, x0, (&a0));
dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC16;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY178507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype292840*)0;
LOC16 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY533289 LOC27;
Ropeobj178006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_178482_2381377266(&r0, LOC28);
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_533515_839829468(t0);
if (!LOC31) goto LA32;
globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY532811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY178507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype292840*)0;
LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1));
putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468) {
Ropeobj178006* sizeexpr0;
Ttype292840* reftype0;
Tloc292816 b0;
TY535238 args0;
Ttype292840* bt0;
sizeexpr0 = sizeexpr_554745_839829468;
reftype0 = skiptypes_296099_850551059((*a0).t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*a0).t, ((Tstorageloc292812) 3));
{
TY178507 LOC5;
Ttype292840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY532811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = ((*a0).s == ((Tstorageloc292812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_169177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY178507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_320123_3876443242((*a0).t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY178507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(a0);
LOC21[1] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_539264_839829468(p0, a0, (&b0), 0);
}
LA7: ;
bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc292816 se0;
Ropeobj178006* LOC6;
LOC3 = (NI)0;
LOC3 = len_293081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj178006*)0;
LOC6 = rdloc_538188_839829468((&se0));
rawgennew_554741_839829468(p0, (&a0), LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_554741_839829468(p0, (&a0), NIM_NIL);
}
LA1: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 f0;
Ttype292840* reftype0;
Ttype292840* bt0;
Ropeobj178006* ti0;
TY532811 LOC1;
TY535238 LOC2;
Ttype292840* LOC3;
Ttype292840* LOC4;
Ttype292840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype292840*)0;
bt0 = (Ttype292840*)0;
ti0 = (Ropeobj178006*)0;
reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_538188_839829468((&f0));
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype292840*)0;
LOC3 = lastson_295377_850551059(reftype0);
LOC4 = (Ttype292840*)0;
LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_535673_839829468((*p0).module, LOC4);
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(reftype0);
bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, (&a0), NIM_FALSE);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0) {
Ttype292840* seqtype0;
TY535238 args0;
Tloc292816 call0;
seqtype0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_532273_839829468((&call0), ((Tlockind292808) 6), (*dest0).t, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC3;
TY532811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_169177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY178507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY178507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_538188_839829468(dest0);
LOC15[1] = rdloc_538188_839829468((&call0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_539264_839829468(p0, dest0, (&call0), 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Ropeobj178006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468((&b0));
gennewseqaux_554795_839829468(p0, (&a0), LOC1);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* seqtype0;
Tloc292816 a0;
TY535238 LOC1;
Ropeobj178006* LOC2;
seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0) {
Ropeobj178006* result0;
Intset268030 check0;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettempname_533598_839829468(m0);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535681) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_533451_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY535235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind535681) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY535238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC7;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(281475111387152) &((NU64)1<<((NU)((*etyp0).kind)&63U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_538204_839829468((&a0));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY532811 LOC14;
Ropeobj178006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_535685_839829468((*p0).module, etyp0, ((Tclosuretypekind535681) 1));
LOC14[1] = rdcharloc_538227_839829468((&a0));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY532811 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_538227_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tnode292802* a0;
Ttype292840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA8;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind292244) 29):
{
usestringh_532345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic292524) 8))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic292524) 8))) goto LA26;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA32;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NI64 LOC40;
Ropeobj178006* LOC41;
if (!(op0 == ((Tmagic292524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(typ0);
LOC41 = (Ropeobj178006*)0;
LOC41 = rope_178401_2381377266(LOC40);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj178006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_320007_3876443242(typ0);
LOC44 = (Ropeobj178006*)0;
LOC44 = rope_178401_2381377266(LOC43);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
NimStringDesc* setlenpattern0;
TY535235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = rdloc_538188_839829468((&b0));
LOC8[2] = gettypedesc_535673_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4);
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdcharloc_538227_839829468(a0);
{
NI64 LOC3;
TY532811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_320001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_320001_3876443242(settype0);
LOC6[1] = rope_178401_2381377266(LOC7);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdsetelemloc_555662_839829468((&b0), a0.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC1[1] = rdcharloc_538227_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3;
internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype292840* LOC13;
LOC13 = (Ttype292840*)0;
LOC13 = elemtype_320394_3876443242((*s0).typ);
if (!((IL64(62277025792) &((NU64)1<<((NU)((*LOC13).kind)&63U)))!=0)) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) {
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*a0)));
LOC1[1] = rdsetelemloc_555662_839829468((&(*b0)), (*a0).t);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) {
Ttype292840* LOC1;
NI64 LOC2;
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 x0;
Tloc292816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode292802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_539283_839829468(p0, ea0, (&a0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0));
b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_556061_839829468;
NI HEX3Atmp_556412_839829468;
NI res_556415_839829468;
i_556061_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1));
res_556415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14;
i_556061_839829468 = res_556415_839829468;
{
TY535238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17;
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_538227_839829468((&a0));
LOC19[1] = rdcharloc_538227_839829468((&x0));
LOC19[2] = rdcharloc_538227_839829468((&y0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY532811 LOC21;
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_538227_839829468((&a0));
LOC21[1] = rdcharloc_538227_839829468((&x0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_556415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 i0;
Ttype292840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic292524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic292524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic292524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic292524) 133):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic292524) 132):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic292524) 131):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic292524) 134):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic292524) 135):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic292524) 136):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic292524) 137):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic292524) 39):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic292524) 40):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic292524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_551222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
{
Ttype292840* LOC33;
TY536475 LOC39;
LOC33 = (Ttype292840*)0;
LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC38;
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
LOC38 = (Ttype292840*)0;
LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1));
gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_538188_839829468((&i0));
LOC39[1] = rope_178401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_538188_839829468((&(*d0)));
LOC39[3] = rdloc_538188_839829468((&a0));
LOC39[4] = rdloc_538188_839829468((&b0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic292524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_532345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_550809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 137):
{
Ttype292840* LOC44;
TY536847 LOC49;
LOC44 = (Ttype292840*)0;
LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA47;
gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&i0));
LOC49[1] = rope_178401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_538188_839829468((&(*d0)));
LOC49[3] = rdloc_538188_839829468((&a0));
LOC49[4] = rdloc_538188_839829468((&b0));
LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
TY178507 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
Tnode292802* q0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_328882_3876443242(n0);
{
Tmagic292524 LOC3;
Tloc292816 b0;
Tloc292816 c0;
Tnode292802* LOC6;
Tnode292802* LOC7;
Tnode292802* LOC8;
NimStringDesc* fmt0;
Ttype292840* LOC9;
TY535238 LOC25;
LOC3 = (Tmagic292524)0;
LOC3 = getmagic_318502_2616423590(q0);
if (!(LOC3 == ((Tmagic292524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1));
initlocexpr_539283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2));
initlocexpr_539283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode292802*)0;
LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3));
initlocexpr_539283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC14;
Ttype292840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype292840*)0;
LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_196113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = rdloc_538188_839829468((&b0));
LOC25[2] = rdloc_538188_839829468((&c0));
result0 = HEX25_178905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype292840* LOC27;
initlocexpr_539283_839829468(p0, n0, (&a0));
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC33;
Ttype292840* LOC34;
NIM_BOOL LOC36;
TY532811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype292840*)0;
LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_538188_839829468((&a0));
LOC40[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY532811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_538188_839829468((&a0));
LOC42[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&a0));
LOC45 = (NI64)0;
LOC45 = lengthord_320007_3876443242(a0.t);
LOC44[1] = rope_178401_2381377266(LOC45);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
{
Ttype292840* LOC47;
LOC47 = (Ttype292840*)0;
LOC47 = lastson_295377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC51;
Ttype292840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468((&a0));
LOC52 = (Ttype292840*)0;
LOC52 = lastson_295377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_320007_3876443242(LOC52);
LOC51[1] = rope_178401_2381377266(LOC53);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_196113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_196113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468);
}
goto LA1;
LA3: ;
{
Ttype292840* LOC6;
Tnode292802* n0;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC6).kind)&63U)))!=0)) goto LA7;
{
if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11;
n0 = n_539790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_539665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_533611_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0));
result0 = addrloc_538204_839829468((&a0));
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode292802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind292020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_538204_839829468((&a0));
}
goto LA27;
LA33: ;
{
result0 = rdloc_538188_839829468((&a0));
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_539289_839829468(p0, n0, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getclosuretype_535685_839829468((*p0).module, t0, ((Tclosuretypekind535681) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_539364_839829468;
NI HEX3Atmp_539376_839829468;
NI LOC6;
NI res_539379_839829468;
i_539364_839829468 = (NI)0;
HEX3Atmp_539376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri0);
HEX3Atmp_539376_839829468 = (LOC6 - 1);
res_539379_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* r0;
if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8;
i_539364_839829468 = res_539379_839829468;
r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468);
{
Tanalysisresult473003 LOC11;
LOC11 = (Tanalysisresult473003)0;
LOC11 = ispartof_473340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_539379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) {
NIM_BOOL containsgcref0;
Ttype292840* typ0;
{ containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_533478_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc292816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2));
nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_538311_839829468(p0, (&(*loc0)), (&nilloc0), 8);
}
goto LA11;
LA13: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY178507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_538204_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY532811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_538204_839829468((&(*loc0)));
LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY532811 LOC29;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_538204_839829468((&(*loc0)));
LOC29[1] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY533289 LOC6;
Ropeobj178006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_178418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
Ttype292840* typ0;
NI length0;
Ropeobj178006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540613_839829468;
NI HEX3Atmp_541214_839829468;
NI res_541217_839829468;
i_540613_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1));
res_541217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3;
i_540613_839829468 = res_541217_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]);
add_178482_2381377266(&pl0, LOC28);
}
LA4: ;
res_541217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_540459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY533289 LOC48;
Ropeobj178006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_178482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj178006* LOC67;
NimStringDesc* LOC68;
TY535235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA59;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_539383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_538350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj178006*)0;
LOC67 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_540464_839829468(pl0);
LOC69[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC71;
NimStringDesc* LOC72;
TY535235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj178006*)0;
LOC71 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_540464_839829468(pl0);
LOC73[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc292816 list0;
TY535235 LOC79;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA77;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_540464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY535235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_540464_839829468(pl0);
LOC82[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
Tnode292802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode292802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21;
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) {
Tnode292802* result0;
Tnode292802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode292802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind292020) 63):
case ((Tnodekind292020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
Tnode292802* ri0;
Ttype292840* t0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_503);
internalerror_196113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode292802* x0;
if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15;
x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20;
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode292802* LOC25;
Tnode292802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC28);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31;
{
Tnode292802* LOC37;
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35;
LOC37 = (Tnode292802*)0;
LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC37);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_541433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) {
Ropeobj178006* result0;
NI i0;
NI j0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj178006* LOC9;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri_541702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC9);
{
NI k_541728_839829468;
NI HEX3Atmp_541904_839829468;
NI HEX3Atmp_541905_839829468;
NI LOC11;
NI res_541908_839829468;
k_541728_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)0;
HEX3Atmp_541905_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_293081_850551059(ri_541702_839829468);
HEX3Atmp_541905_839829468 = (LOC11 - 1);
res_541908_839829468 = HEX3Atmp_541904_839829468;
{
while (1) {
TY533289 LOC14;
Ropeobj178006* LOC15;
Ropeobj178006* LOC16;
if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13;
k_541728_839829468 = res_541908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_178482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj178006*)0;
LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468);
add_178482_2381377266(&result0, LOC16);
res_541908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode292802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0);
{
Ttype292840* typ0;
TY533289 LOC31;
Ropeobj178006* LOC32;
TY533289 LOC46;
Ropeobj178006* LOC47;
if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj178006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_178482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_178482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj178006* LOC38;
LOC35 = (NI)0;
LOC35 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj178006*)0;
LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0);
add_178482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_541793_839829468;
NI HEX3Atmp_541915_839829468;
NI HEX3Atmp_541916_839829468;
NI LOC40;
NI res_541919_839829468;
k_541793_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)0;
HEX3Atmp_541916_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_293081_850551059(ri0);
HEX3Atmp_541916_839829468 = (LOC40 - 1);
res_541919_839829468 = HEX3Atmp_541915_839829468;
{
while (1) {
TY533289 LOC43;
Ropeobj178006* LOC44;
Ropeobj178006* LOC45;
if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42;
k_541793_839829468 = res_541919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj178006*)0;
LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_178482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj178006*)0;
LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0);
add_178482_2381377266(&result0, LOC45);
res_541919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj178006*)0;
LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_178482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj178006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj178006*)0;
LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode292802* arg0;
Ropeobj178006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57;
arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj178006*)0;
LOC58 = genargnoparam_539938_839829468(p0, arg0);
add_178482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj178006* LOC60;
LOC60 = (Ropeobj178006*)0;
LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype292840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0);
{
TY533289 LOC71;
Ropeobj178006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_178482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj178006* LOC74;
LOC74 = (Ropeobj178006*)0;
LOC74 = gettypedesc_535673_839829468((*p0).module, t0);
add_178482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) {
Ropeobj178006* pl0;
TY533289 LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
Ttype292840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX26_178418_2381377266(callee0, LOC2);
pl0 = HEX26_178418_2381377266(LOC3, params0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY533289 LOC17;
Ropeobj178006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_178482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA28;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_539383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_538350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj178006*)0;
LOC36 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_178482_2381377266(&pl0, LOC38);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC40;
TY533289 LOC41;
Ropeobj178006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj178006*)0;
LOC40 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_178482_2381377266(&pl0, LOC42);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY533289 LOC44;
Ropeobj178006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA57;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY533289 LOC60;
Ropeobj178006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj178006*)0;
LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_178482_2381377266(&pl0, LOC61);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ttype292840* typ_541940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_498);
internalerror_196113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj178006* pl0;
Ttype292840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA26;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY533289 LOC29;
Ropeobj178006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj178006*)0;
LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_178482_2381377266(&pl0, LOC30);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj178006* pl0;
Ropeobj178006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj178006* LOC37;
LOC34 = (NI)0;
LOC34 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj178006*)0;
LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468);
add_178482_2381377266(&pl0, LOC37);
}
LA35: ;
add_178482_2381377266(&pl0, op0.r);
params0 = (Ropeobj178006*)0;
{
NI i_542425_839829468;
NI HEX3Atmp_542609_839829468;
NI res_542612_839829468;
i_542425_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1));
res_542612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj178006* LOC47;
if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40;
i_542425_839829468 = res_542612_839829468;
{
TY533289 LOC45;
Ropeobj178006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_178482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj178006*)0;
LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468);
add_178482_2381377266(¶ms0, LOC47);
res_542612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
TY533289 LOC1;
Ttype292840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_507);
internalerror_196113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_178482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj178006* LOC24;
TY533289 LOC25;
Ropeobj178006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj178006*)0;
LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_178482_2381377266(&pl0, LOC26);
}
LA22: ;
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC31;
Ropeobj178006* LOC32;
Ropeobj178006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_178482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj178006*)0;
LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_543051_839829468;
NI HEX3Atmp_543617_839829468;
NI res_543620_839829468;
i_543051_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1));
res_543620_839829468 = start0;
{
while (1) {
Tsym292834* param0;
TY533289 LOC42;
Ropeobj178006* LOC43;
TY533289 LOC44;
Ropeobj178006* LOC45;
Ropeobj178006* LOC46;
if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36;
i_543051_839829468 = res_543620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_295327_850551059(typ0);
if (!(LOC39 <= i_543051_839829468)) goto LA40;
internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj178006*)0;
LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_178482_2381377266(&pl0, LOC43);
add_178487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj178006*)0;
LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0);
add_178482_2381377266(&pl0, LOC46);
res_543620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY533289 LOC61;
Ropeobj178006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj178006*)0;
LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_178482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY533289 LOC71;
Ropeobj178006* LOC72;
Ropeobj178006* LOC73;
TY533289 LOC74;
Ropeobj178006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA69;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_178482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj178006*)0;
LOC73 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj178006*)0;
LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_178482_2381377266(&pl0, LOC75);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC77;
TY533289 LOC78;
Ropeobj178006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj178006*)0;
LOC77 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj178006*)0;
LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_178482_2381377266(&pl0, LOC79);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY533289 LOC81;
Ropeobj178006* LOC82;
Tloc292816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj178006*)0;
LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_178482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA85;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY533289 LOC88;
Ropeobj178006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj178006*)0;
LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_178482_2381377266(&pl0, LOC89);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* params0;
Ttype292840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540213_839829468;
NI HEX3Atmp_540445_839829468;
NI res_540448_839829468;
i_540213_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1));
res_540448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3;
i_540213_839829468 = res_540448_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]);
add_178482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_540448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) {
Ropeobj178006** LOC1;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY532811 LOC1;
Ttype292840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_538204_839829468((&a0));
LOC2 = (Ttype292840*)0;
LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NIM_BOOL LOC6;
Ropeobj178006* args0;
Tloc292816 a0;
TY532811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY533289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_512);
internalerror_196113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_554404_839829468;
NI HEX3Atmp_554431_839829468;
NI LOC8;
NI res_554434_839829468;
i_554404_839829468 = (NI)0;
HEX3Atmp_554431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1));
res_554434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10;
i_554404_839829468 = res_554434_839829468;
{
Tnode292802* LOC13;
LOC13 = (Tnode292802*)0;
LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]);
if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14;
add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY178507 LOC17;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_554434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_176644_4151366050);
LOC18[0] = makecstring_191638_155036129(LOC19);
LOC18[1] = args0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 arr0;
NI LOC5;
Ropeobj178006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_295351_850551059(t0);
LOC6 = (Ropeobj178006*)0;
LOC6 = intliteral_539270_839829468(((NI64) (LOC5)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC6);
{
NI i_555031_839829468;
NI HEX3Atmp_555039_839829468;
NI LOC8;
NI res_555042_839829468;
i_555031_839829468 = (NI)0;
HEX3Atmp_555039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_295351_850551059(t0);
HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1));
res_555042_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC11;
Ttype292840* LOC12;
TY532811 LOC13;
if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10;
i_555031_839829468 = res_555042_839829468;
LOC11 = (Ttype292840*)0;
LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype292840*)0;
LOC12 = elemtype_320394_3876443242(LOC11);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468((&(*d0)));
LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc292812) 3);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0));
res_555042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_554439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 elem0;
Tloc292816 a0;
Tloc292816 arr0;
NI L0;
NI64 LOC9;
Ropeobj178006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj178006*)0;
LOC10 = intliteral_539270_839829468(((NI64) (L0)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC10);
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_555090_839829468;
NI HEX3Atmp_555104_839829468;
NI res_555107_839829468;
i_555090_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)(L0 - ((NI) 1));
res_555107_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
Ttype292840* LOC17;
Ttype292840* LOC18;
TY532811 LOC19;
if (!(res_555107_839829468 <= HEX3Atmp_555104_839829468)) goto LA13;
i_555090_839829468 = res_555107_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc292812) 3);
LOC17 = (Ttype292840*)0;
LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype292840*)0;
LOC18 = elemtype_320394_3876443242(LOC17);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&a0));
LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_539264_839829468(p0, (&elem0), (&arr0), 3);
res_555107_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 25):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY535238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_538204_839829468(dest0);
LOC2[1] = addrloc_538204_839829468(src0);
LOC2[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
TY535238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_538204_839829468(dest0);
LOC4[1] = rdloc_538188_839829468(src0);
LOC4[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY535238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_538204_839829468(dest0);
LOC6[1] = addrloc_538204_839829468(src0);
LOC6[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC10;
TY535238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind529007)0;
LOC10 = maptype_533394_839829468(ty0);
if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468(dest0);
LOC13[1] = rdloc_538188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_320135_3876443242((*dest0).t);
LOC13[2] = rope_178401_2381377266(LOC14);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(dest0);
LOC16[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(dest0);
LOC18[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
switch (op0) {
case ((Tmagic292524) 127):
case ((Tmagic292524) 126):
{
genandor_554311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 99) ... ((Tmagic292524) 117):
{
unaryarith_552646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 96) ... ((Tmagic292524) 98):
{
unaryarithoverflow_551633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 52) ... ((Tmagic292524) 55):
{
binaryfloatarith_556729_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 56) ... ((Tmagic292524) 93):
{
binaryarith_551819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 95):
{
geneqproc_552214_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 45) ... ((Tmagic292524) 51):
{
binaryarithoverflow_551262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 149):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 259):
{
gengettypeinfo_555383_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 156):
{
genswap_555638_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic292524) 26):
case ((Tmagic292524) 27):
{
Ttype292840* underlying0;
underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((IL64(34084860461056) &((NU64)1<<((NU)((*underlying0).kind)&63U)))!=0);
LA21: ;
if (!LOC20) goto LA22;
binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ranged0;
Ropeobj178006* res0;
NimStringDesc* LOC25;
TY532811 LOC31;
Ropeobj178006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28;
LOC25 = copyString(fun64_557055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_557060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, (&a0), (&b0), LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_535673_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0));
}
LA18: ;
}
break;
case ((Tmagic292524) 138):
{
genstrconcat_554452_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 144):
{
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic292524) 145):
{
genstrappend_554554_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 146):
{
genseqelemappend_554683_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 128):
{
genstrequals_556667_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 129):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic292524) 130):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic292524) 157):
{
genisnil_552620_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 120):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic292524) 121):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic292524) 119):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic292524) 118):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic292524) 122):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic292524) 123):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic292524) 124):
{
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic292524) 125):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 12):
{
genof_555331_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 29):
{
gennew_554782_839829468(p0, e0);
}
break;
case ((Tmagic292524) 30):
{
gennewfinalize_555111_839829468(p0, e0);
}
break;
case ((Tmagic292524) 31):
{
gennewseq_554824_839829468(p0, e0);
}
break;
case ((Tmagic292524) 32):
{
gennewseqofcap_554836_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 9):
{
Ttype292840* t0;
TY178507 LOC55;
Ropeobj178006* LOC56;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC56 = (Ropeobj178006*)0;
LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0));
}
break;
case ((Tmagic292524) 42):
{
gensomecast_556481_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 28):
{
genord_556475_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 35):
case ((Tmagic292524) 8):
case ((Tmagic292524) 34):
case ((Tmagic292524) 36):
case ((Tmagic292524) 33):
{
genarraylen_555415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 37):
case ((Tmagic292524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic292524) 43):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic292524) 44):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic292524) 151):
{
gensetlengthstr_555632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 152):
{
gensetlengthseq_555500_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 39):
case ((Tmagic292524) 40):
case ((Tmagic292524) 41):
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
case ((Tmagic292524) 131):
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 148):
{
gensetop_556419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 161):
case ((Tmagic292524) 162):
case ((Tmagic292524) 159):
case ((Tmagic292524) 160):
case ((Tmagic292524) 150):
case ((Tmagic292524) 163):
{
Tsym292834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj178006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_178856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj178006*)0;
LOC79 = cgsym_532403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_543632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 164):
{
genreset_554731_839829468(p0, e0);
}
break;
case ((Tmagic292524) 17):
{
Tnode292802* LOC82;
Tnode292802* LOC83;
LOC82 = (Tnode292802*)0;
LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC83 = (Tnode292802*)0;
LOC83 = skipconv_328882_3876443242(LOC82);
genecho_554369_839829468(p0, LOC83);
}
break;
case ((Tmagic292524) 158):
{
genarrtoseq_555046_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 223) ... ((Tmagic292524) 257):
case ((Tmagic292524) 19) ... ((Tmagic292524) 24):
{
localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic292524) 208):
{
Tnode292802* n0;
n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 155):
{
Tnode292802* n0;
n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 209):
{
Tloc292816 a0;
Tloc292816 b0;
Tnode292802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode292802* LOC91;
Tnode292802* LOC94;
LOC91 = (Tnode292802*)0;
LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92;
LOC94 = (Tnode292802*)0;
LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_550374_839829468(p0, (&a0), (&b0));
}
break;
case ((Tmagic292524) 140):
case ((Tmagic292524) 94):
{
gencall_543632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI292524)));
internalerror_196100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tbitset339004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj178006*)0;
cs0 = (Tbitset339004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_340001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj178006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY535238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_549629_839829468(cs0, size0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_549629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj178006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = gensetnode_549664_839829468(p0, e0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY178507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_557537_839829468;
NI HEX3Atmp_557603_839829468;
NI LOC18;
NI res_557606_839829468;
i_557537_839829468 = (NI)0;
HEX3Atmp_557603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_295351_850551059(e0);
HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1));
res_557606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20;
i_557537_839829468 = res_557606_839829468;
{
Ttype292840* LOC25;
TY535235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23;
LOC25 = (Ttype292840*)0;
LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&idx0));
LOC26[1] = rdloc_538188_839829468((&(*d0)));
LOC26[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC26[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY532811 LOC28;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_538188_839829468((&(*d0)));
LOC28[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_557606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY178507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_320135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_557575_839829468;
NI HEX3Atmp_557611_839829468;
NI LOC35;
NI res_557614_839829468;
i_557575_839829468 = (NI)0;
HEX3Atmp_557611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_295351_850551059(e0);
HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1));
res_557614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37;
i_557575_839829468 = res_557614_839829468;
{
Ttype292840* LOC42;
NimStringDesc* LOC43;
TY535235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40;
LOC42 = (Ttype292840*)0;
LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&idx0));
LOC44[1] = rdloc_538188_839829468((&(*d0)));
LOC44[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC44[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY532811 LOC47;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_538188_839829468((&(*d0)));
LOC47[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_557614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI id0;
Ropeobj178006* tmp0;
Ropeobj178006* LOC2;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC1 = (Ropeobj178006*)0;
LOC1 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY535238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_550436_839829468(p0, d0, t0, tmp0);
{
if (!!(((285212672 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0))) goto LA15;
(*d0).s = ((Tstorageloc292812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype292840* t0;
Ropeobj178006* LOC10;
NI id0;
Ropeobj178006* LOC11;
Ropeobj178006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_318566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj178006*)0;
LOC11 = rope_178401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1));
{
TY535238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_558234_839829468;
NI HEX3Atmp_558242_839829468;
NI LOC11;
NI res_558245_839829468;
i_558234_839829468 = (NI)0;
HEX3Atmp_558242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_295351_850551059(n0);
HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1));
res_558245_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13;
i_558234_839829468 = res_558245_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468)));
arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0));
res_558245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype292840* t0;
Ropeobj178006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC6 = (Ropeobj178006*)0;
LOC6 = gettypedesc_535673_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_557646_839829468;
NI HEX3Atmp_557803_839829468;
NI LOC12;
NI res_557806_839829468;
i_557646_839829468 = (NI)0;
HEX3Atmp_557803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_295351_850551059(n0);
HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1));
res_557806_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
TY532811 LOC19;
if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14;
i_557646_839829468 = res_557806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_557646_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&(*d0)));
LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468)));
rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_539248_839829468(p0, it0, (&rec0));
res_557806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0) {
Tsym292834* result0;
Ttype292840* ty0;
result0 = (Tsym292834*)0;
ty0 = ty_553157_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) {
Tloc292816 test0;
Tloc292816 u0;
Tloc292816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_553525_839829468;
NI HEX3Atmp_554039_839829468;
NI LOC2;
NI res_554042_839829468;
i_553525_839829468 = (NI)0;
HEX3Atmp_554039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1));
res_554042_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tsym292834* op0;
Tnode292802* disc0;
Ropeobj178006* o0;
Tsym292834* d0;
NI id0;
Tnode292802* LOC9;
Ropeobj178006* strlit0;
if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4;
i_553525_839829468 = res_554042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_553525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2));
initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_553154_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0));
v0.r = o0;
add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode292802*)0;
LOC9 = newstrnode_293677_850551059(((Tnodekind292020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = rope_178401_2381377266(((NI64) (id0)));
strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY532811 LOC20;
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468((&test0));
LOC20[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY532811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_538188_839829468((&test0));
LOC22[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_554042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 tmp0;
Ttype292840* t0;
NIM_BOOL isref0;
Ropeobj178006* r0;
Ropeobj178006* LOC13;
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256));
gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind292244) 22));
r0 = rdloc_538188_839829468((&tmp0));
{
Ttype292840* LOC10;
TY178507 LOC11;
if (!isref0) goto LA8;
rawgennew_554741_839829468(p0, (&tmp0), NIM_NIL);
LOC10 = (Ttype292840*)0;
LOC10 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_554439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_538388_839829468(p0, (&tmp0), NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj178006*)0;
LOC13 = gettypedesc_535673_839829468((*p0).module, t0);
ty0 = getuniquetype_528640_2036603609(t0);
{
NI i_554944_839829468;
NI HEX3Atmp_554997_839829468;
NI LOC15;
NI res_555000_839829468;
i_554944_839829468 = (NI)0;
HEX3Atmp_554997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_293081_850551059(e0);
HEX3Atmp_554997_839829468 = (LOC15 - 1);
res_555000_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tloc292816 tmp20;
Tsym292834* field0;
if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17;
i_554944_839829468 = res_555000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_554944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_553154_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_293081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind292808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc292812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc292812) 2);
}
LA29: ;
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_555000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA34;
LA36: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* destt0;
Ttype292840* srct0;
destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj178006* lbl0;
Tloc292816 tmp0;
TY178507 LOC7;
TY535238 LOC8;
TY178507 LOC9;
Ropeobj178006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*destt0).kind)&63U)))!=0);
if (LOC3) goto LA4;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*srct0).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_535673_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind292808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc292812) 2);
tmp0.flags = 0;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* desttype0;
desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0);
if (!LOC3) goto LA4;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype292840* LOC6;
Ttype292840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
Tloc292816 a0;
Ropeobj178006* LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((6291456 &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0)) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind529007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind529007)0;
LOC9 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind529007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc292816 a0;
Ropeobj178006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj178006*)0;
LOC14 = addrloc_538204_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC1;
Ropeobj178006* first0;
NI64 LOC2;
Ttype292840* LOC47;
Ttype292840* LOC48;
TY535238 LOC49;
Ropeobj178006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(ty0);
first0 = intliteral_539270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_318510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_320001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY532811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_320001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_320001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_320004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_320004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_538227_839829468((&b0));
LOC30 = (NI64)0;
LOC30 = lastord_320004_3876443242(ty0);
LOC29[1] = intliteral_539270_839829468(LOC30);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY535238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_538227_839829468((&b0));
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_320004_3876443242(ty0);
LOC32[2] = intliteral_539270_839829468(LOC33);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_320129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_320001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype292840*)0;
LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype292840*)0;
LOC48 = elemtype_320394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = rdcharloc_538227_839829468((&b0));
LOC49[2] = first0;
LOC50 = (Ropeobj178006*)0;
LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* LOC10;
Ttype292840* LOC11;
TY532811 LOC12;
Ropeobj178006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
{
TY532811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&b0));
LOC5[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype292840*)0;
LOC11 = elemtype_320394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&a0));
LOC12[1] = rdcharloc_538227_839829468((&b0));
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC27;
Ttype292840* LOC28;
TY532811 LOC29;
Ropeobj178006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8;
{
TY535238 LOC14;
if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&b0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY535238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&b0));
LOC16[1] = rdloc_538188_839829468((&a0));
LOC16[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA19;
(*d0).s = ((Tstorageloc292812) 3);
}
LA19: ;
{
Ttype292840* LOC23;
TY178507 LOC26;
LOC23 = (Ttype292840*)0;
LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
if (!((6291456 &((NU64)1<<((NU)((*LOC23).kind)&63U)))!=0)) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype292840*)0;
LOC28 = elemtype_320394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
LOC29[1] = rdcharloc_538227_839829468((&b0));
LOC30 = (Ropeobj178006*)0;
LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC5;
Ttype292840* LOC6;
TY532811 LOC7;
Ropeobj178006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype292840*)0;
LOC6 = elemtype_320394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC7[1] = rdcharloc_538227_839829468((&b0));
LOC8 = (Ropeobj178006*)0;
LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
NI i0;
Ropeobj178006* LOC5;
Ttype292840* ty0;
Ropeobj178006* r0;
TY178507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj178006*)0;
LOC5 = gettypedesc_535673_839829468((*p0).module, a0.t);
ty0 = getuniquetype_528640_2036603609(a0.t);
r0 = rdloc_538188_839829468((&a0));
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) (i0)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 29):
{
gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleelem_553124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244)));
appendChar(LOC12, 41);
internalerror_196100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) {
Tctypekind529007 mt0;
{ mt0 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype292840* LOC9;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10;
(*d0).s = ((Tstorageloc292812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc292816 a0;
Ttype292840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode292802* LOC25;
Tnode292802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
LOC26 = (Tnode292802*)0;
LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_539289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind292244) 22):
{
(*d0).s = ((Tstorageloc292812) 3);
}
break;
case ((Ttypekind292244) 23):
{
(*d0).s = ((Tstorageloc292812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj178006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind292020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj178006*)0;
LOC44 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind292244) 21):
{
(*d0).s = ((Tstorageloc292812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj178006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind292020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj178006*)0;
LOC61 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj178006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind529007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj178006*)0;
LOC68 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY178507 LOC70;
Ropeobj178006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_538188_839829468((&a0));
LOC71 = (Ropeobj178006*)0;
LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) {
Ttype292840* result0;
Ropeobj178006* LOC9;
result0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj178006*)0;
LOC9 = gettypedesc_535673_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_528640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY178507 LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym292834* field0;
TY178507 LOC11;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
Tsym292834* field0;
TY178507 LOC9;
Ropeobj178006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj178006*)0;
LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_178482_2381377266(&r0, LOC10);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0];
{
TY178507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen)));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]);
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) {
NI topblock0;
Ropeobj178006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj178006*)0;
LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0]));
add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0)));
line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0);
}
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) {
NI topblock0;
Ropeobj178006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY178507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY178507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178401_2381377266(((NI64) (framelen0)));
addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_544035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI oldbreakidx_546099_839829468;
TY533289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_546099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym292834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind292808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI length0;
length0 = sonslen_295351_850551059(n0);
{
NI i_558420_839829468;
NI HEX3Atmp_558424_839829468;
NI res_558427_839829468;
i_558420_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2));
res_558427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3;
i_558420_839829468 = res_558427_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]);
res_558427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ropeobj178006* lelse0;
Ropeobj178006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, n0);
lend0 = getlabel_539217_839829468(p0);
{
NI i_545011_839829468;
NI HEX3Atmp_545435_839829468;
NI LOC9;
NI res_545438_839829468;
i_545011_839829468 = (NI)0;
HEX3Atmp_545435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059(n0);
HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1));
res_545438_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11;
i_545011_839829468 = res_545438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_297441_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind292808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_545011_839829468];
{
NI LOC20;
TY533289 LOC23;
NI LOC24;
TY532811 LOC25;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_539217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = lelse0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj178006** LOC32;
Ropeobj178006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj178006**)0;
LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_544060_839829468(p0);
{
NI LOC37;
TY178507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_539230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY533289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_293081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_544060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_545438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_539230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype292840* dest0;
Tnode292802* arg0;
Ttype292840* src0;
Tloc292816 a0;
Ropeobj178006* r0;
NIM_BOOL isref0;
Ttype292840* LOC10;
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, arg0, (&a0));
r0 = rdloc_538188_839829468((&a0));
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((14680064 &((NU64)1<<((NU)((*LOC10).kind)&63U)))!=0);
{
if (!isref0) goto LA13;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_558650_839829468;
NI HEX3Atmp_558677_839829468;
NI LOC17;
NI res_558680_839829468;
i_558650_839829468 = (NI)0;
HEX3Atmp_558677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_326252_3876443242(dest0, src0);
HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_558680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19;
i_558650_839829468 = res_558680_839829468;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_558680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype292840* LOC28;
TY532811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype292840*)0;
LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((14680064 &((NU64)1<<((NU)((*LOC28).kind)&63U)))!=0);
LA27: ;
if (!LOC26) goto LA29;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_538188_839829468((&(*d0)));
LOC31[1] = r0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_533515_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC23;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY178507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype292840*)0;
LOC23 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY535238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY532811 LOC45;
Ropeobj178006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_538188_839829468((&a0));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY532811 LOC48;
Ropeobj178006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC48[1] = addrloc_538204_839829468((&a0));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
TY532811 LOC8;
Ropeobj178006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0, 1048576);
LOC3 = ((IL64(34084860461056) &((NU64)1<<((NU)((*LOC5).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_538227_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY536475 LOC11;
Ropeobj178006* LOC12;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_538227_839829468((&a0));
LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_178277_2381377266(magic0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_554439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_297324_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
Ropeobj178006* tmp0;
Ropeobj178006* LOC6;
TY535238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_557810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1));
}
goto LA1;
LA4: ;
{
Tloc292816 tmp0;
Tloc292816 a0;
Tloc292816 b0;
TY535238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&tmp0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_539258_839829468(p0, d0, (&tmp0));
}
LA1: ;
}
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj178006* tmp0;
TY178507 LOC27;
Ropeobj178006* gotoarray0;
TY532811 LOC28;
TY178507 LOC33;
NI topblock0;
Ropeobj178006* oldbody0;
Ropeobj178006* tailb0;
Ropeobj178006* taila0;
Tnode292802* casestmt0;
Tloc292816 a_545871_839829468;
TY532811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_545768_839829468;
NI HEX3Atmp_545934_839829468;
NI LOC2;
NI res_545937_839829468;
i_545768_839829468 = (NI)0;
HEX3Atmp_545934_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_545934_839829468 = (LOC2 - 1);
res_545937_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545937_839829468 <= HEX3Atmp_545934_839829468)) goto LA4;
i_545768_839829468 = res_545937_839829468;
it0 = (*n0).kindU.S6.sons->data[i_545768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7;
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = lastson_295364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_545768_839829468;
asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_545937_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_545819_839829468;
NI HEX3Atmp_545942_839829468;
NI res_545945_839829468;
i_545819_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)(arraysize0 - ((NI) 1));
res_545945_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC32;
if (!(res_545945_839829468 <= HEX3Atmp_545942_839829468)) goto LA31;
i_545819_839829468 = res_545945_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_545945_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545854_839829468;
NI HEX3Atmp_545950_839829468;
NI HEX3Atmp_545951_839829468;
NI LOC35;
NI res_545954_839829468;
j_545854_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)0;
HEX3Atmp_545951_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_293081_850551059(n0);
HEX3Atmp_545951_839829468 = (LOC35 - 1);
res_545954_839829468 = HEX3Atmp_545950_839829468;
{
while (1) {
if (!(res_545954_839829468 <= HEX3Atmp_545951_839829468)) goto LA37;
j_545854_839829468 = res_545954_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]);
res_545954_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545866_839829468;
NI HEX3Atmp_545959_839829468;
NI res_545962_839829468;
j_545866_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)(casepos0 - ((NI) 1));
res_545962_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545962_839829468 <= HEX3Atmp_545959_839829468)) goto LA40;
j_545866_839829468 = res_545962_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]);
res_545962_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_538188_839829468((&a_545871_839829468));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_545894_839829468;
NI HEX3Atmp_545978_839829468;
NI LOC43;
NI res_545981_839829468;
i_545894_839829468 = (NI)0;
HEX3Atmp_545978_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_293081_850551059(casestmt0);
HEX3Atmp_545978_839829468 = (LOC43 - 1);
res_545981_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC46;
NI LOC47;
Tnode292802* it0;
Tnode292802* LOC57;
Ropeobj178006** LOC58;
Ropeobj178006** LOC59;
Tloc292816 a0;
TY532811 LOC60;
if (!(res_545981_839829468 <= HEX3Atmp_545978_839829468)) goto LA45;
i_545894_839829468 = res_545981_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468];
{
NI j_545910_839829468;
NI HEX3Atmp_545970_839829468;
NI LOC49;
NI res_545973_839829468;
j_545910_839829468 = (NI)0;
HEX3Atmp_545970_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_293081_850551059(it0);
HEX3Atmp_545970_839829468 = (NI)(LOC49 - ((NI) 2));
res_545973_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC56;
if (!(res_545973_839829468 <= HEX3Atmp_545970_839829468)) goto LA51;
j_545910_839829468 = res_545973_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_545973_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode292802*)0;
LOC57 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC57);
LOC58 = (Ropeobj178006**)0;
LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj178006**)0;
LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_544060_839829468(p0);
res_545981_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
NI oldbreakidx_546011_839829468;
TY533289 LOC1;
Tnode292802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj178006* label0;
TY532811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_293081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_545744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_539244_839829468(p0, loopbody0);
}
LA9: ;
{
TY533289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3;
localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY532811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_559481_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_178856_2381377266(extname0);
LOC1[3] = makecstring_191638_155036129(LOC2);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{ {
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3));
}
LA3: ;
{
Tcgen529027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_532241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_538812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_532369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj178006* decl0;
Ropeobj178006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
TY178507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_178482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY532811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_538350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY535238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_191638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ);
appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* sloc0;
Ropeobj178006* header0;
TY178507 LOC8;
Ropeobj178006* generatedproc0;
TY535235 LOC9;
Ropeobj178006** LOC10;
Ropeobj178006** LOC11;
Ropeobj178006** LOC12;
TY178507 LOC13;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_533598_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_532949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_532945_839829468(p0, s0);
sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj178006**)0;
LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) {
{
NIM_BOOL LOC3;
Ropeobj178006* prc0;
Ropeobj178006** LOC7;
TY178507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0);
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_533550_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_543695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10;
genderef_543921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_539248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) {
Tsym292834* v0;
Tcproc529021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7;
gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_538819_839829468(targetproc0, v0);
genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_532201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0);
}
LA29: ;
registergcroot_543762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode292802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_543781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj178006* decl0;
Tloc292816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_320120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_532823_839829468(p0, a0);
decl0 = localvardecl_538532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode292802* LOC50;
Tnode292802* LOC52;
Ropeobj178006* params0;
Ttype292840* typ0;
TY532811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode292802*)0;
LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode292802*)0;
LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_544619_839829468;
NI HEX3Atmp_544825_839829468;
NI LOC56;
NI res_544828_839829468;
i_544619_839829468 = (NI)0;
HEX3Atmp_544825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_293081_850551059(value0);
HEX3Atmp_544825_839829468 = (LOC56 - 1);
res_544828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC65;
if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58;
i_544619_839829468 = res_544828_839829468;
{
TY533289 LOC63;
Ropeobj178006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj178006*)0;
LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_178482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj178006*)0;
LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0);
add_178482_2381377266(¶ms0, LOC65);
res_544828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY532811 LOC68;
initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_538188_839829468((&tmp0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_538614_839829468(p0, v0);
initlocalvar_538398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71;
genlinedir_532823_839829468(targetproc0, a0);
loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)));
{
Tloc292816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_532823_839829468(p0, a0);
loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 tup0;
Tloc292816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype292840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_295351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_543822_839829468;
NI HEX3Atmp_543905_839829468;
NI res_543908_839829468;
i_543822_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3));
res_543908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7;
i_543822_839829468 = res_543908_839829468;
{
Tnode292802* LOC10;
LOC10 = (Tnode292802*)0;
LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_543908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode292802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode292802*)0;
LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc);
genstmts_539244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_532823_839829468(p0, n0);
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_528640_2036603609(tup0.t);
{
NI i_543846_839829468;
NI HEX3Atmp_543914_839829468;
NI res_543917_839829468;
i_543846_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3));
res_543917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20;
i_543846_839829468 = res_543917_839829468;
{
Tsym292834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_538819_839829468(p0, v0);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
registergcroot_543762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode292802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_538614_839829468(p0, v0);
LOC31 = (Tnode292802*)0;
LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_543781_839829468(LOC31);
initlocalvar_538398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s);
{
TY532811 LOC37;
if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468((&tup0));
LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468)));
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY532811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_538188_839829468((&tup0));
LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_539258_839829468(p0, (&(*v0).loc), (&field0));
} LA21: ;
res_543917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) {
{
NI i_544869_839829468;
NI HEX3Atmp_544902_839829468;
NI LOC2;
NI res_544905_839829468;
i_544869_839829468 = (NI)0;
HEX3Atmp_544902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1));
res_544905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4;
i_544869_839829468 = res_544905_839829468;
{
Tnode292802* a0;
a0 = (*n0).kindU.S6.sons->data[i_544869_839829468];
{
if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16;
gensinglevar_544276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_544832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_543794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_544905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym292834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym292834*)0;
LOC3 = getmodule_299123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
NI i_544924_839829468;
NI HEX3Atmp_544975_839829468;
NI LOC2;
NI res_544978_839829468;
i_544924_839829468 = (NI)0;
HEX3Atmp_544975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1));
res_544978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4;
i_544924_839829468 = res_544978_839829468;
{
Tnode292802* it0;
Tsym292834* c0;
it0 = (*t0).kindU.S6.sons->data[i_544924_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((17629200 &((NU64)1<<((NU)((*(*c0).typ).kind)&63U)))!=0);
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_293081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_532248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_539240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_544978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) {
Tloc292816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_295351_850551059(b0);
{
NI i_547122_839829468;
NI HEX3Atmp_547410_839829468;
NI res_547413_839829468;
i_547122_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)(length0 - ((NI) 2));
res_547413_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY535238 LOC5;
if (!(res_547413_839829468 <= HEX3Atmp_547410_839829468)) goto LA3;
i_547122_839829468 = res_547413_839829468;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(e0);
LOC5[1] = rdloc_538188_839829468((&x0));
LOC5[2] = labl0;
appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_547413_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_539248_839829468(p0, n0, d0);
endblock_544060_839829468(p0);
}
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) {
Ropeobj178006* result0;
Ropeobj178006* lend0;
result0 = (Ropeobj178006*)0;
lend0 = getlabel_539217_839829468(p0);
{
NI i_546984_839829468;
NI res_547017_839829468;
i_546984_839829468 = (NI)0;
res_547017_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC10;
if (!(res_547017_839829468 <= until0)) goto LA3;
i_546984_839829468 = res_547017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_297441_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind292808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468))));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY178507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13;
length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]);
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_547017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) {
Tloc292816 x0;
Tloc292816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_295351_850551059(b0);
{
NI i_546932_839829468;
NI HEX3Atmp_546958_839829468;
NI res_546961_839829468;
i_546932_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2));
res_546961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3;
i_546932_839829468 = res_546961_839829468;
{
TY535235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6;
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_538227_839829468(e0);
LOC8[1] = rdcharloc_538227_839829468((&x0));
LOC8[2] = rdcharloc_538227_839829468((&y0));
LOC8[3] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY535238 LOC10;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_538227_839829468(e0);
LOC10[1] = rdcharloc_538227_839829468((&x0));
LOC10[2] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_546961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0) {
Ropeobj178006* result0;
NI labid0;
result0 = (Ropeobj178006*)0;
labid0 = (*p0).labels;
{
NI i_547042_839829468;
NI res_547083_839829468;
i_547042_839829468 = (NI)0;
res_547083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547083_839829468 <= until0)) goto LA3;
i_547042_839829468 = res_547083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY178507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_547083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY178507 LOC17;
TY178507 LOC18;
LOC14 = (NI)0;
LOC14 = len_293081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc292816 a0;
Ropeobj178006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_295351_850551059(t0);
lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), (&a0));
fixlabel_539230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_547435_839829468;
NI HEX3Atmp_547550_839829468;
NI LOC2;
NI res_547553_839829468;
i_547435_839829468 = (NI)0;
HEX3Atmp_547550_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_547550_839829468 = (NI)(LOC2 - ((NI) 1));
res_547553_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547553_839829468 <= HEX3Atmp_547550_839829468)) goto LA4;
i_547435_839829468 = res_547553_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547435_839829468]).kind == ((Tnodekind292020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547435_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_547553_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY191350* branches0;
Tloc292816 a0;
NI labid0;
TY532811 LOC26;
TY533289 LOC35;
Ropeobj178006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY191350*)0;
branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_547484_839829468;
NI HEX3Atmp_547560_839829468;
NI LOC16;
NI res_547563_839829468;
i_547484_839829468 = (NI)0;
HEX3Atmp_547560_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(t0);
HEX3Atmp_547560_839829468 = (NI)(LOC16 - ((NI) 1));
res_547563_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547563_839829468 <= HEX3Atmp_547560_839829468)) goto LA18;
i_547484_839829468 = res_547563_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_547484_839829468]).kind == ((Tnodekind292020) 85))) goto LA21;
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547484_839829468], (&a0), LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_547563_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&a0));
LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_547518_839829468;
NI HEX3Atmp_547568_839829468;
NI res_547571_839829468;
j_547518_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_547571_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547571_839829468 <= HEX3Atmp_547568_839829468)) goto LA29;
j_547518_839829468 = res_547571_839829468;
{
TY532811 LOC34;
if (!!((branches0->data[j_547518_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_539270_839829468(((NI64) (j_547518_839829468)));
LOC34[1] = branches0->data[j_547518_839829468];
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_547571_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY178507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_295351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059(t0);
lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_539230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) {
{ {
NI i_545695_839829468;
NI HEX3Atmp_545737_839829468;
NI LOC2;
NI res_545740_839829468;
i_545695_839829468 = (NI)0;
HEX3Atmp_545737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(casestmt0);
HEX3Atmp_545737_839829468 = (LOC2 - 1);
res_545740_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC5;
NI LOC6;
Tnode292802* it0;
Tnode292802* LOC16;
if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4;
i_545695_839829468 = res_545740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468];
{
NI j_545711_839829468;
NI HEX3Atmp_545730_839829468;
NI LOC8;
NI res_545733_839829468;
j_545711_839829468 = (NI)0;
HEX3Atmp_545730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(it0);
HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2));
res_545733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC15;
if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10;
j_545711_839829468 = res_545733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_178401_2381377266(val0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_545733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode292802*)0;
LOC16 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC16);
endblock_544060_839829468(p0);
res_545740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_547591_839829468;
NI HEX3Atmp_547609_839829468;
NI LOC2;
NI res_547612_839829468;
i_547591_839829468 = (NI)0;
HEX3Atmp_547609_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(b0);
HEX3Atmp_547609_839829468 = (NI)(LOC2 - ((NI) 2));
res_547612_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547612_839829468 <= HEX3Atmp_547609_839829468)) goto LA4;
i_547591_839829468 = res_547612_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_547591_839829468]).kind == ((Tnodekind292020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_547612_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_547631_839829468;
NI HEX3Atmp_547655_839829468;
NI LOC2;
NI res_547658_839829468;
i_547631_839829468 = (NI)0;
HEX3Atmp_547655_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_547655_839829468 = (NI)(LOC2 - ((NI) 1));
res_547658_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* stmtblock0;
if (!(res_547658_839829468 <= HEX3Atmp_547655_839829468)) goto LA4;
i_547631_839829468 = res_547658_839829468;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547631_839829468);
stmtblock0 = lastson_295364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181));
if (!LOC7) goto LA8;
result0 = i_547631_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind292020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_547576_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_547631_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_547658_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI splitpoint0;
Tloc292816 a0;
Ropeobj178006* lend0;
splitpoint0 = ifswitchsplitpoint_547616_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, (&a0));
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY178507 LOC11;
NIM_BOOL hasdefault0;
TY533289 LOC37;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_538227_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_547758_839829468;
NI HEX3Atmp_547817_839829468;
NI HEX3Atmp_547818_839829468;
NI LOC13;
NI res_547821_839829468;
i_547758_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)0;
HEX3Atmp_547818_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_293081_850551059(n0);
HEX3Atmp_547818_839829468 = (LOC13 - 1);
res_547821_839829468 = HEX3Atmp_547817_839829468;
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC28;
TY533289 LOC29;
if (!(res_547821_839829468 <= HEX3Atmp_547818_839829468)) goto LA15;
i_547758_839829468 = res_547821_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_297441_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind292808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547758_839829468);
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24;
gencaserange_537028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode292802*)0;
LOC28 = lastson_295364_850551059(branch0);
exprblock_544103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_547821_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY533289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_539230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ttype292840* LOC8;
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind292244) 28):
{
genstringcase_547417_839829468(p0, t0, d0);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_545673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_547725_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) {
Tnode292802* result0;
NI L0;
result0 = (Tnode292802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq292796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq292796*)0;
stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_545471_839829468;
NI res_545596_839829468;
i_545471_839829468 = (NI)0;
res_545596_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* trystmt0;
Tnode292802* finallystmt0;
if (!(res_545596_839829468 <= howmanytrys0)) goto LA3;
i_545471_839829468 = res_545596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_295364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18;
genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_545596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_545546_839829468;
NI HEX3Atmp_545601_839829468;
NI res_545604_839829468;
i_545546_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_545604_839829468 = HEX3Atmp_545601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_545604_839829468)) goto LA22;
i_545546_839829468 = res_545604_839829468;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_545604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_545587_839829468;
NI HEX3Atmp_545610_839829468;
NI res_545613_839829468;
i_545587_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_545613_839829468 = HEX3Atmp_545610_839829468;
{
while (1) {
TY533289 LOC32;
if (!(((NI) 0) <= res_545613_839829468)) goto LA31;
i_545587_839829468 = res_545613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_545613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) {
TY533289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_532823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj178006* safepoint0;
TY178507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI idx0;
Ropeobj178006* label0;
TY178507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym292834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode292802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ropeobj178006* tmp0;
TY532811 LOC2;
NI64 LOC3;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_320007_3876443242((*d0).typ);
LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI64 L0;
TY535235 LOC8;
t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
L0 = lengthord_320007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY178507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0);
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(tmp0);
LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1)));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 tmp0;
Tnode292802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_549144_839829468(p0, (&a0), (&tmp0), (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_539264_839829468(p0, (&a0), (&tmp0), 0);
}
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) {
genlinedir_532823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc292816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode292802* LOC13;
Tnode292802* LOC16;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_549209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) {
Ropeobj178006* result0;
NimStringDesc* res0;
result0 = (Ropeobj178006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_548547_839829468;
NI HEX3Atmp_548644_839829468;
NI LOC2;
NI res_548647_839829468;
i_548547_839829468 = (NI)0;
HEX3Atmp_548644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1));
res_548647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4;
i_548547_839829468 = res_548647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) {
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym;
{
Tloc292816 a0;
Ropeobj178006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = rdloc_538188_839829468((&a0));
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_178856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj178006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14;
LOC16 = (Ropeobj178006*)0;
LOC16 = gettypedesc_535673_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_178856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj178006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_533205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_178856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_548647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_548604_839829468;
NI first_548656_839829468;
NI last_548658_839829468;
x_548604_839829468 = (NimStringDesc*)0;
first_548656_839829468 = ((NI) 0);
last_548658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35;
last_548658_839829468 += ((NI) 1);
} LA35: ;
}
x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, tnl_176644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47;
last_548658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50;
last_548658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54;
last_548658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_548656_839829468 = last_548658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0);
appendString(res0, tnl_176644_4151366050);
result0 = rope_178277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
genlinedir_532823_839829468(p0, t0);
s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE);
{
TY178507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_539244_839829468(p0, stmts0);
endblock_544060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ropeobj178006* exc0;
TY533289 LOC16;
NI LOC17;
NI length0;
TY178507 LOC18;
Ropeobj178006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY533289 LOC78;
Tnode292802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, t0);
exc0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC10;
Ropeobj178006* LOC13;
LOC10 = (Tsym292834*)0;
LOC10 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj178006*)0;
LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_295351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj178006*)0;
LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_544035_839829468(p0, LOC19);
{
TY533289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_297441_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind292808) 0);
}
LA33: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj178006** LOC39;
TY533289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY533289 LOC45;
NI LOC46;
TY533289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_544060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC57;
TY533289 LOC58;
NI LOC59;
TY533289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_547979_839829468;
NI HEX3Atmp_548101_839829468;
NI res_548104_839829468;
j_547979_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2));
res_548104_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC56;
if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51;
j_547979_839829468 = res_548104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547979_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_548104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_544060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY533289 LOC70;
NI LOC71;
Tnode292802* finallyblock0;
TY533289 LOC76;
Ropeobj178006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY533289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_295364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74;
genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj178006*)0;
LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77);
endblock_544060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode292802*)0;
LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178277_2381377266(r0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) {
Ropeobj178006* result0;
NI L0;
result0 = (Ropeobj178006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NIM_BOOL LOC8;
Ropeobj178006* safepoint0;
TY178507 LOC17;
TY178507 LOC18;
TY178507 LOC37;
NI LOC38;
NI length0;
TY533289 LOC39;
TY533289 LOC40;
NI LOC41;
TY533289 LOC42;
NI i0;
Tnode292802* LOC95;
TY178507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_532823_839829468(p0, t0);
safepoint0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC11;
Ropeobj178006* LOC14;
LOC11 = (Tsym292834*)0;
LOC11 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj178006*)0;
LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj178006* LOC16;
LOC16 = (Ropeobj178006*)0;
LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY178507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY178507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY178507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY178507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_295351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_544060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY533289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_297441_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind292808) 0);
}
LA56: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY533289 LOC67;
NI LOC68;
TY178507 LOC69;
TY533289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY533289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_544060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC91;
NI LOC92;
TY178507 LOC93;
TY533289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_548247_839829468;
NI HEX3Atmp_548521_839829468;
NI res_548524_839829468;
j_548247_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2));
res_548524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY178507 LOC86;
if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74;
j_548247_839829468 = res_548524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_548524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_544060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode292802*)0;
LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts));
endblock_544060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj178006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj178006*)0;
LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
Tnode292802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7;
gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc292816 a0;
Ropeobj178006* e0;
Ttype292840* typ0;
NimStringDesc* LOC13;
TY532811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_538188_839829468((&a0));
typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_532823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_546824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY533289 LOC24;
Ropeobj178006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) {
}
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) {
Tcfilesection529005 result0;
result0 = (Tcfilesection529005)0;
result0 = ((Tcfilesection529005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection529005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection529005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection529005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection529005 section0;
Tnode292802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1));
section0 = determinesection_548819_839829468(LOC5);
genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_532823_839829468(p0, t0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY535238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_548860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_548860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_192415_155036129((*t0).info);
LOC12[0] = rope_178401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_192257_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_191638_155036129(LOC14);
LOC12[2] = makecstring_191638_155036129(name0);
appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
Ttype292840* typ0;
TY535238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_538204_839829468((&a0));
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_191638_155036129(LOC6);
LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) {
{
NI i_549054_839829468;
NI HEX3Atmp_549073_839829468;
NI LOC2;
NI res_549076_839829468;
i_549054_839829468 = (NI)0;
HEX3Atmp_549073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1));
res_549076_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
Tspecialword275003 LOC5;
if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4;
i_549054_839829468 = res_549076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_549054_839829468];
LOC5 = (Tspecialword275003)0;
LOC5 = whichpragma_318911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword275003) 191):
{
genemit_548839_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 131):
{
genbreakpoint_548862_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 176):
{
genwatchpoint_549016_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 183):
{
Tcproc529021* p0;
Ropeobj178006** LOC10;
p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_549076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI oldbreakidx_546411_839829468;
Tsym292834* forloopvar0;
Tloc292816 rangea0;
Tloc292816 rangeb0;
Tnode292802* call0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY533289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_538614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*forloopvar0).loc));
LOC1[1] = rdloc_538188_839829468((&rangea0));
LOC1[2] = rdloc_538188_839829468((&rangeb0));
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_178277_2381377266(LOC2);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI64 idx0;
TY178507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_196185_1689653243(T839829468_650);
internalerror_196113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_178401_2381377266(idx0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY178507 LOC1;
TY533289 LOC2;
TY533289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_544214_839829468;
NI64 HEX3Atmp_544223_839829468;
NI64 res_544226_839829468;
i_544214_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_544226_839829468 = IL64(0);
{
while (1) {
TY178507 LOC6;
if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5;
i_544214_839829468 = res_544226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(i_544214_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_544226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY178507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3;
initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind292435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_539201_839829468(sym0);
genprocprototype_539254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_532951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_196085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj178006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_532311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj178006*)0;
LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_558249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind292435) 19):
{
Ropeobj178006* LOC30;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position)));
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0));
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 20):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_539236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_196100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_532945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj178006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_532949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj178006*)0;
LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA44: ;
}
break;
case ((Tsymkind292435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_196100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_196100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_196100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj178006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_297441_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj178006*)0;
LOC82 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0));
}
LA80: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
Ropeobj178006* LOC84;
LOC84 = (Ropeobj178006*)0;
LOC84 = genliteral_539273_839829468(p0, n0);
putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19):
case ((Tnodekind292020) 5):
{
Ropeobj178006* LOC86;
LOC86 = (Ropeobj178006*)0;
LOC86 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0));
}
break;
case ((Tnodekind292020) 27):
case ((Tnodekind292020) 32):
case ((Tnodekind292020) 29):
case ((Tnodekind292020) 30):
case ((Tnodekind292020) 31):
case ((Tnodekind292020) 26):
case ((Tnodekind292020) 28):
{
Tnode292802* op0;
genlinedir_532823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc292816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_543632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_543632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind292020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj178006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_293081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj178006*)0;
LOC115 = gensetnode_549664_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_557496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind292020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_293081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype292840* LOC126;
LOC126 = (Ttype292840*)0;
LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127;
genseqconstr_555004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_558207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind292020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_293081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_557618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind292020) 38):
{
genobjconstr_554903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 61):
{
gencast_556538_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
case ((Tnodekind292020) 60):
{
genconv_556633_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 64):
case ((Tnodekind292020) 63):
{
genaddr_553051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 42):
{
genbracketexpr_554277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
genderef_543921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind292020) 45):
{
genrecordfield_553448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 46):
{
gencheckedrecordfield_554046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 127):
case ((Tnodekind292020) 112):
{
genblock_546083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 126):
{
genstmtlistexpr_558402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 115):
{
{
NI i_559023_839829468;
NI HEX3Atmp_559276_839829468;
NI LOC151;
NI res_559279_839829468;
i_559023_839829468 = (NI)0;
HEX3Atmp_559276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_295351_850551059(n0);
HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1));
res_559279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153;
i_559023_839829468 = res_559279_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]);
res_559279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind292020) 48):
case ((Tnodekind292020) 92):
{
genif_544982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 93):
{
expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind292020) 66):
{
downconv_558581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 67):
{
upconv_558431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 68):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind292020) 69):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind292020) 70):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind292020) 71):
{
convstrtocstr_556643_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 72):
{
convcstrtostr_556655_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 51):
case ((Tnodekind292020) 52):
{
Tsym292834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tnodekind292020) 155):
{
genclosure_557836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 1):
{
}
break;
case ((Tnodekind292020) 96):
{
genwhilestmt_545985_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 99):
case ((Tnodekind292020) 100):
{
genvarstmt_544854_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 101):
{
genconststmt_544909_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 94):
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind292020) 97):
{
gencase_547827_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 109):
{
genreturnstmt_545617_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 110):
{
genbreakstmt_546444_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183;
genasgn_549239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind292020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188;
genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind292020) 114):
{
{
Tloc292816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193;
genlinedir_532823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind292020) 89):
{
genasmstmt_548659_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_547866_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_548114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind292020) 108):
{
genraisestmt_546828_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 98):
{
gentypesection_538184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind292020) 125):
case ((Tnodekind292020) 84):
case ((Tnodekind292020) 121):
case ((Tnodekind292020) 116):
case ((Tnodekind292020) 117):
case ((Tnodekind292020) 118):
case ((Tnodekind292020) 119):
case ((Tnodekind292020) 120):
case ((Tnodekind292020) 83):
case ((Tnodekind292020) 82):
{
}
break;
case ((Tnodekind292020) 90):
{
genpragma_549039_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 91):
{
Tnode292802* LOC211;
LOC211 = (Tnode292802*)0;
LOC211 = lastson_295364_850551059(n0);
expr_539248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind292020) 79):
case ((Tnodekind292020) 80):
case ((Tnodekind292020) 81):
{
{
Tsym292834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym292834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym292834*)0;
LOC220 = skipgenericowner_297280_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym292834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym292834*)0;
LOC231 = getmodule_299123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind292435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode292802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode292802*)0;
LOC242 = getbody_335226_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_532951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind292020) 95):
{
genparforstmt_546208_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 157):
{
genstate_544117_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 156):
{
gengotostate_544144_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 158):
{
genbreakstate_544229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_196100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_539248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_658);
internalerror_196113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_178277_2381377266(LOC5);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_178487_2381377266(&result0, (*(*m0).name).s);
add_178487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) {
Ropeobj178006* init0;
Ropeobj178006* datinit0;
TY178507 LOC1;
TY178507 LOC2;
init0 = getinitname_562235_839829468(m0);
datinit0 = getdatinitname_562239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY178507 LOC7;
Ropeobj178006* initcall0;
TY178507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11;
add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_561717_839829468;
NI HEX3Atmp_561722_839829468;
NI res_561725_839829468;
i_561717_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1);
res_561725_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC5;
if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4;
i_561717_839829468 = res_561725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_561725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj178006* initstackbottomcall0;
TY536475 LOC38;
TY535238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj178006* LOC24;
if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj178006* LOC29;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj178006*)0;
LOC29 = genfilenames_561688_839829468(m0);
add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY533289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_529151_3723162438;
LOC38[1] = gbreakpoints_548861_839829468;
LOC38[2] = othermodsinit_529150_3723162438;
{
NIM_BOOL LOC41;
TY533289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_532949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_529149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3);
{
TY533289 LOC52;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_562243_839829468((*m0).module);
{
Tnode292802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_432151_3853300031();
{
NI i_563891_839829468;
NI HEX3Atmp_563895_839829468;
NI LOC16;
NI res_563898_839829468;
i_563891_839829468 = (NI)0;
HEX3Atmp_563895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(disp0);
HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1));
res_563898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18;
i_563891_839829468 = res_563898_839829468;
genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym);
res_563898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_561729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym292834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_196100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_560906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_529171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) {
Ropeobj178006* initname0;
Ropeobj178006* prc0;
TY178507 LOC1;
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
Ropeobj178006** LOC14;
Ropeobj178006** LOC15;
Ropeobj178006** LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC33;
Ropeobj178006** LOC34;
Ropeobj178006** LOC35;
Ropeobj178006** LOC36;
Ropeobj178006* LOC37;
Ropeobj178006* LOC38;
Ropeobj178006** LOC39;
Ropeobj178006** LOC40;
Ropeobj178006** LOC41;
Ropeobj178006* LOC42;
Ropeobj178006* LOC50;
TY533289 LOC51;
TY178507 LOC52;
TY533289 LOC58;
initname0 = getinitname_562235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY532811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY532811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = initgcframe_538435_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj178006*)0;
LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj178006**)0;
LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj178006**)0;
LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj178006**)0;
LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8));
{
Ropeobj178006* procname0;
Ropeobj178006* LOC28;
Ropeobj178006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj178006*)0;
LOC28 = quotedfilename_196818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj178006*)0;
LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28);
add_178482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY533289 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_178482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj178006**)0;
LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj178006**)0;
LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj178006**)0;
LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj178006*)0;
LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj178006*)0;
LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj178006**)0;
LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj178006**)0;
LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj178006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj178006*)0;
LOC49 = deinitframe_560150_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj178006*)0;
LOC50 = deinitgcframe_538441_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_562239_839829468((*m0).module);
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection529005 i_562401_839829468;
NI res_562482_839829468;
i_562401_839829468 = (Tcfilesection529005)0;
res_562482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj178006* LOC56;
Ropeobj178006* LOC57;
if (!(res_562482_839829468 <= ((NI) 16))) goto LA55;
i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468));
LOC56 = (Ropeobj178006*)0;
LOC56 = gensectionstart_530015_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC56);
add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]);
LOC57 = (Ropeobj178006*)0;
LOC57 = gensectionend_530050_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC57);
res_562482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0);
{
NIM_CHAR i_562442_839829468;
Ropeobj178006* el_562443_839829468;
TY529136 HEX3Atmp_562487_839829468;
NIM_CHAR i_562490_839829468;
i_562442_839829468 = (NIM_CHAR)0;
el_562443_839829468 = (Ropeobj178006*)0;
memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468));
memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468));
i_562490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_562442_839829468 = i_562490_839829468;
el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48];
{
Ropeobj178006* ex0;
TY532811 LOC70;
if (!!((el_562443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_562443_839829468;
ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_562490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj178006*)0;
LOC3 = gettypedesc_535673_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY536475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0);
LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0);
LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_178277_2381377266(LOC8);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) {
NimStringDesc* LOC1;
TY178507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_176644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1)));
addf_179205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getcopyright_561665_839829468(cfile0);
addinttypes_561659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY178507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_538656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype292840* t_538761_839829468;
NI i_538768_839829468;
NI L_538770_839829468;
t_538761_839829468 = (Ttype292840*)0;
i_538768_839829468 = ((NI) 0);
L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj178006* LOC12;
if (!(i_538768_839829468 < L_538770_839829468)) goto LA11;
t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468];
LOC12 = (Ropeobj178006*)0;
LOC12 = gettypedesc_535673_839829468(m0, t_538761_839829468);
i_538768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_538656_839829468;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) {
NimStringDesc* LOC1;
Tstrentry147009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_176644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_176644_4151366050);
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1);
it0 = ((Tstrentry147009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj178006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_176644_4151366050);
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(LOC8);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY178507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry147009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
result0 = getfileheader_561683_839829468(cfile0);
LOC1 = (Ropeobj178006*)0;
LOC1 = genmergeinfo_530203_2760143328(m0);
add_178482_2381377266(&result0, LOC1);
generatethreadlocalstorage_538717_839829468(m0);
generateheaders_560104_839829468(m0);
{
Tcfilesection529005 i_562614_839829468;
NI res_562622_839829468;
i_562614_839829468 = (Tcfilesection529005)0;
res_562622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC5;
Ropeobj178006* LOC6;
if (!(res_562622_839829468 <= ((NI) 10))) goto LA4;
i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468));
LOC5 = (Ropeobj178006*)0;
LOC5 = gensectionstart_530015_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC5);
add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]);
LOC6 = (Ropeobj178006*)0;
LOC6 = gensectionend_530050_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC6);
res_562622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_530832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
LA5: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) {
{
NimStringDesc* externc0;
TY178507 LOC12;
if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(externc0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_273859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438);
generatethreadvarssize_538771_839829468(m0);
}
LA9: ;
code0 = genmodule_562491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_563621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_273863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj178006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_530832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_273859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* guard0;
TY178507 LOC1;
TY128506 LOC2;
TY178507 LOC3;
TY533289 LOC13;
TY178507 LOC14;
result0 = getcopyright_561665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_178277_2381377266(LOC2.Field1);
guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_561659_839829468(&result0);
generateheaders_560104_839829468(m0);
generatethreadlocalstorage_538717_839829468(m0);
{
Tcfilesection529005 i_563171_839829468;
NI res_563197_839829468;
i_563171_839829468 = (Tcfilesection529005)0;
res_563197_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
if (!(res_563197_839829468 <= ((NI) 10))) goto LA6;
i_563171_839829468 = ((Tcfilesection529005) (res_563197_839829468));
LOC7 = (Ropeobj178006*)0;
LOC7 = gensectionstart_530015_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC7);
add_178482_2381377266(&result0, (*m0).s[(i_563171_839829468)- 0]);
LOC8 = (Ropeobj178006*)0;
LOC8 = gensectionend_530050_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC8);
res_563197_839829468 += ((NI) 1);
} LA6: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) {
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3;
finishmodule_563420_839829468(generatedheader_532201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6;
{
Tcgen529027* m_563916_839829468;
m_563916_839829468 = (Tcgen529027*)0;
{
NI i_563935_839829468;
NI HEX3Atmp_563937_839829468;
NI res_563939_839829468;
i_563935_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10;
i_563935_839829468 = res_563939_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13;
m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468];
{
if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17;
finishmodule_563420_839829468(m_563916_839829468);
}
LA17: ;
}
LA13: ;
res_563939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen529027* m_563917_839829468;
m_563917_839829468 = (Tcgen529027*)0;
{
NI i_563946_839829468;
NI HEX3Atmp_563948_839829468;
NI res_563950_839829468;
i_563946_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22;
i_563946_839829468 = res_563950_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25;
m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468];
{
if (!(*m_563917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_563813_839829468(m_563917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_563637_839829468(m_563917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_563950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_274789_2528170400(gmapping_529152_3723162438);
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34;
writeheader_563149_839829468(generatedheader_532201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) {
{
Tcfilesection529005 i_562848_839829468;
NI res_562853_839829468;
i_562848_839829468 = (Tcfilesection529005)0;
res_562853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_562853_839829468 <= ((NI) 17))) goto LA3;
i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468));
unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL);
res_562853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) {
{
NIM_CHAR i_563014_839829468;
NI res_563019_839829468;
i_563014_839829468 = (NIM_CHAR)0;
res_563019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_563019_839829468 <= ((NI) 57))) goto LA3;
i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL);
res_563019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) {
initlinkedlist_147031_3771138726((&(*m0).headerfiles));
initintset_268885_2627731572((&(*m0).declaredprotos));
initidtable_296019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0));
initnodetable_296085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533598_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533598_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_562833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_562858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) {
{
Tcgen529027* m_563026_839829468;
m_563026_839829468 = (Tcgen529027*)0;
{
NI i_563031_839829468;
NI HEX3Atmp_563033_839829468;
NI res_563035_839829468;
i_563031_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4;
i_563031_839829468 = res_563035_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7;
m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468];
resetmodule_562763_839829468(m_563026_839829468);
}
LA7: ;
res_563035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468);
nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
chckNil((void*)(&nimtvdeclared_538675_839829468));
genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030));
initintset_268885_2627731572((&nimtvdeclared_538675_839829468));
breakpointid_548860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
smooth.multi.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <R.h>
#include <Rmath.h>
#include <Rinternals.h>
#if defined _OPENMP
#include <omp.h>
#endif
// find the maximum value
static inline double maxDouble( double * A, const size_t n) {
double p = - INFINITY;
size_t i;
for( i=0; i < n; i++ ) if( A[i] > p ) p = A[i];
return p;
}
// find the minimum value
static inline double minDouble( double * A, const size_t n) {
double p = INFINITY;
size_t i;
for( i=0; i < n; i++ ) if( A[i] < p ) p = A[i];
return p;
}
/* swap function */
static inline void swap( double * A, const size_t b, const size_t c) {
double d = A[b];
A[b] = A[c];
A[c] = d;
return;
}
/* partition function */
static inline size_t partition( double * A, const size_t left, const size_t right, const size_t pivot) {
size_t i,tmp;
double p = A[pivot];
swap(A,pivot,right); // swap value to end
tmp = left; // set the tmp value to the begining
for( i = left;i < right; i++)
if( A[i] < p ) {
swap(A,tmp,i); // if A[i] < p then move the result to tmp
tmp++; // then increment tmp by one
}
swap(A, right, tmp); // swap back the pivot
return tmp;
}
//Quickselect
// right should be equal to n-1 and left should be 0 when searching all of A
static inline double quantile_quickSelect( double * A, size_t left, size_t right, const size_t k) {
size_t i;
size_t pivot;
size_t n = right+1;
while(1) {
// 0. check for singleton
if (left==right) return( A[left] );
// 1. find initial pivot element
pivot = (left+right)/2;
// 2. Partition A by A[pivot];
pivot = partition( A, left, right, pivot);
if( k == pivot ) {
return( A[k] );
} else if( k < pivot ) {
right = pivot -1;
} else {
left = pivot +1;
}
}
// anything at this point is unreachable
return(NAN);
}
/* integer max */
int intMax ( int x, int y) {
return( ( x > y) ? x : y ) ;
}
/* integer min */
int intMin ( int x, int y) {
return( ( x < y) ? x : y ) ;
}
/* modal kernel */
double modalKernel(
int * x, /* raster image */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t M = 0;
size_t m = 0;
double maxValue = -INFINITY; /* used to determine max weighted value */
int mu = 0;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
size_t k_local;
size_t l_local;
int * maxArray = (int *) calloc( dRow * dCol, sizeof(int) );
double * maxArrayValue = (double *) calloc( dRow * dCol, sizeof(double) );
// handle tie breaks
double tieBreak;
double maxTie = runif(0.0,1.0);
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k*nCol + l] == NAN ) continue;
if( x[k*nCol + l] >= 0 ) { /* only run over non-negative values */
for(m=0; m < M; m++) {
/* increment found values */
if( maxArray[m] == x[k*nCol + l] ) {
maxArrayValue[m] += W[ k_local*dCol + l_local];
break;
}
}
/* if the value is not found add it */
if( m == M) {
maxArray[m] = x[k*nCol + l ];
maxArrayValue[m] = W[ k_local*dCol + l_local];
M++;
}
}
}
}
/* handle the all NA case */
if( M == 0 ) {
free(maxArray);
free(maxArrayValue);
return( -1 ) ;
}
/* determine max value */
for(m=0; m < M ; m++) {
if( maxArrayValue[m] > maxValue ) {
maxValue = maxArrayValue[m];
mu = maxArray[m];
// handle ties
} else if( maxArrayValue[m] == maxValue ) {
tieBreak = runif(0.0, 1.0);
if( tieBreak > maxTie ) {
maxValue = maxArrayValue[m];
mu = maxArray[m];
maxTie = tieBreak;
}
}
}
free(maxArray);
free(maxArrayValue);
return( mu ) ;
}
/* quantile kernel */
double quantileKernel(
double * x, /* naip image */
double * W, /* pre computed spatial weights */
double quantile, /* quantile */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t quantile_t; /* size_t quantile */
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double tmp;
// create a copy of the data, it will be mutated by the quantile call
double * tmpArray = (double *) calloc( dRow * dCol, sizeof(double) );
double mu;
int m = 0;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k*nCol + l] == NAN ) continue;
// only consider elements with positive valued weights
if( W[ k_local*dCol + l_local] > 0 ) {
tmpArray[m] = x[k*nCol + l];
m++;
}
}
}
if ( m > 0) {
/* get the index corresponding to the quantile */
/* first take care of edge cases */
if( quantile == 0.0 ) {
mu = minDouble( tmpArray, m);
} else if (quantile == 1.0) {
mu = maxDouble( tmpArray, m);
} else {
/* per Type 1 definition */
tmp = ((double) m) * quantile;
if( fabs( tmp - floor( tmp ) ) == 0.0 ) { // does R use machine epsilon?
//printf("g=0");
quantile_t = ((size_t) tmp) -1;
} else {
// printf("g=1");
quantile_t = ((size_t) tmp) ;
}
//Rprintf("m=%d quantile=%f quantile_t=%d\n", (int) m, quantile, (int) quantile_t );
//for(int ii=0; ii < m; ii++) Rprintf("%f,\n",tmpArray[ii]);
mu = quantile_quickSelect( tmpArray, 0, m-1, quantile_t);
//Rprintf("mu = %f\n", mu);
}
} else {
mu = NAN;
}
free(tmpArray);
return( mu ) ;
}
/* generic kernel */
double meanKernel(
double * x, /* naip image */
double * var, /* */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double mu = 0;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/* first pass variance */
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
mu += x[k * nCol + l] * W[ k_local*dCol + l_local];
w += W[ k_local*dCol + l_local];
}
}
return( mu/w ) ;
}
/* generic kernel */
double gaussianKernel(
double * x, /* naip image */
double hInv, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double w2 = 0;
double mu = 0;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/* first pass variance */
for( k=k_start; k < k_stop; k++) {
for( l=l_start; l < l_stop; l++) {
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
w = (x[k * nCol + l] - x[i * nCol + j]) *hInv;
w *= w;
mu += exp( -0.5 * w ) * 0.3989423 * hInv;
w2 += 1.0;
}
}
if( w2 > 0) mu = mu/w2;
return( mu ) ;
}
/* variance kernel */
double varKernel(
double * x, /* naip image */
double * mu, /* */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double var = 0; /* smoothed x value we are goinng to return */
double varTmp;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
/* correctly handle NAN and INF cases */
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/*
* k_start creates a link to the original data
* k_local creates a link to the weights
*/
// second pass for variance
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start - j + (dCol/2);
l < l_stop;
l++, l_local++
) {
/* not mathematically correct, but good enough */
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
if( mu[k * nCol + l] == INFINITY ) continue;
if( mu[k * nCol + l] == -INFINITY ) continue;
if( mu[k * nCol + l] == NAN ) continue;
varTmp = x[k * nCol + l] - mu[i* nCol + j];
var += varTmp * varTmp * W[ k_local*dCol + l_local];
w += W[ k_local*dCol + l_local] ;
}
}
return( var/w ) ;
}
void rSmoothLocalMoments(
double * x, /* this is the multi year naip images */
double * mu, /* this is the input/returned mu */
double * var, /* this is the input/returned Var */
double * WMu, /* weight */
double * WVar, /* weight */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr,
int * momentsPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
mu[i*nCol + j] = meanKernel( x, var, WMu, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
if( *momentsPtr > 1) {
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
var[i*nCol + j] = varKernel( x, mu, WMu, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
}
return;
}
void rSmoothCategorical(
int * x, /* this is the multi year naip images */
int * mu, /* this is the input/returned mu */
double * WMu, /* weight */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
if( x[i*nCol+j] >= 0 ) {
mu[i*nCol + j] = modalKernel( x, WMu, i,j,dRow,dCol,nRow,nCol);
} else {
mu[i*nCol + j] = x[i*nCol + j];
}
}
}
#pragma omp barrier
return;
}
void rSmoothLocalQuantile(
double * x, /* this is the multi year naip images */
double * mu, /* this is the input/returned mu */
double * WMu, /* weight */
double * quantile, /* quantile */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
mu[i*nCol + j] = quantileKernel( x, WMu, *quantile, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
return;
}
|
GB_unaryop__identity_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_fp32
// op(A') function: GB_tran__identity_int8_fp32
// C type: int8_t
// A type: float
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z ; GB_CAST_SIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_fp32
(
int8_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> MSFenvAccess;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
DeclGroupPtrTy
ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
ParsedAttributesWithRange &Attrs);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return Tok.is(tok::kw_using) ||
isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
// to the parse stream now.
if (!OpenMPTokens.empty()) {
PP.EnterToken(Tok, /*IsReinject*/ true);
PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true,
/*IsReinject*/ true);
ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true);
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens);
void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc = nullptr);
void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr) {
CachedTokens OpenMPTokens;
ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
ReplayOpenMPAttributeTokens(OpenMPTokens);
}
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
CachedTokens &OpenMPTokens);
IdentifierInfo *TryParseCXX11AttributeIdentifier(
SourceLocation &Loc,
Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None,
const IdentifierInfo *EnclosingScope = nullptr);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickCore Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
#define WIN32_LEAN_AND_MEAN
#define VC_EXTRALEAN
#include <windows.h>
#endif
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/geometry.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/semaphore-private.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/threshold.h"
#include "magick/thread_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
Global declarations.
*/
static MonitorHandler
monitor_handler = (MonitorHandler) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualIndexQueue(cache_view);
%
% The format of the AcquireCacheViewIndexes method is:
%
% const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport const IndexPacket *AcquireCacheViewIndexes(
const CacheView *cache_view)
{
return(GetCacheViewVirtualIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception);
%
% The format of the AcquireCacheViewPixels method is:
%
% const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireCacheViewPixels(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImagePixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in RAM, or in a memory-mapped file. The
% returned pointer should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not
% thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% Deprecated, replace with:
%
% GetVirtualPixels(image,x,y,columns,rows,exception);
%
% The format of the AcquireImagePixels() method is:
%
% const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
return(GetVirtualPixels(image,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireIndexes() returns the black channel or the colormap indexes
% associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% Deprecated, replace with:
%
% GetVirtualIndexQueue(image);
%
% The format of the AcquireIndexes() method is:
%
% const IndexPacket *AcquireIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: AcquireIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetVirtualPixels().
%
% o image: the image.
%
*/
MagickExport const IndexPacket *AcquireIndexes(const Image *image)
{
return(GetVirtualIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMemory() returns a pointer to a block of memory at least size bytes
% suitably aligned for any use.
%
% The format of the AcquireMemory method is:
%
% void *AcquireMemory(const size_t size)
%
% A description of each parameter follows:
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *AcquireMemory(const size_t size)
{
void
*allocation;
assert(size != 0);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
allocation=malloc(size);
return(allocation);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewPixel(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
% x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewVirtualPixel(
% const CacheView *cache_view,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel(
const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
x,y,pixel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% MagickPixelPacket pixel;
% GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOneMagickPixel() method is:
%
% MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
(void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs. If you plan to
% modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOnePixel() method is:
%
% PixelPacket AcquireOnePixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x,
const ssize_t y,ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneVirtualPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception);
%
% The format of the AcquireOneVirtualPixel() method is:
%
% PixelPacket AcquireOneVirtualPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o virtual_pixel_method: the virtual pixel method.
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,
exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetVirtualPixels().
%
% Deprecated, replace with:
%
% GetVirtualPixelQueue(image);
%
% The format of the AcquirePixels() method is:
%
% const PixelPacket *AcquirePixels(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *AcquirePixels(const Image *image)
{
return(GetVirtualPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireSemaphoreInfo() acquires a semaphore.
%
% The format of the AcquireSemaphoreInfo method is:
%
% void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo **) NULL);
if (*semaphore_info == (SemaphoreInfo *) NULL)
{
InitializeMagickMutex();
LockMagickMutex();
if (*semaphore_info == (SemaphoreInfo *) NULL)
*semaphore_info=AllocateSemaphoreInfo();
UnlockMagickMutex();
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImage() replaces the colors of an image with the closest color from
% a reference image.
%
% Deprecated, replace with:
%
% RemapImage(quantize_info,image,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
Image *image,const Image *affinity_image)
{
return(RemapImage(quantize_info,image,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% Deprecated, replace with:
%
% RemapImages(quantize_info,images,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
% Image *images,Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
Image *images,const Image *affinity_image)
{
return(RemapImages(quantize_info,images,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImage() returns a pointer to an image structure initialized to
% default values.
%
% Deprecated, replace with:
%
% AcquireImage(image_info);
%
% The format of the AllocateImage method is:
%
% Image *AllocateImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AllocateImage(const ImageInfo *image_info)
{
return(AcquireImage(image_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AllocateImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% Deprecated, replace with:
%
% AcquireImageColormap(image,colors);
%
% The format of the AllocateImageColormap method is:
%
% MagickBooleanType AllocateImageColormap(Image *image,
% const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
MagickExport MagickBooleanType AllocateImageColormap(Image *image,
const size_t colors)
{
return(AcquireImageColormap(image,colors));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% Deprecated, replace with:
%
% AcquireNextImage(image_info,image);
%
% The format of the AllocateNextImage method is:
%
% void AllocateNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image)
{
AcquireNextImage(image_info,image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateString() allocates memory for a string and copies the source string
% to that memory location (and returns it).
%
% The format of the AllocateString method is:
%
% char *AllocateString(const char *source)
%
% A description of each parameter follows:
%
% o source: A character string.
%
*/
MagickExport char *AllocateString(const char *source)
{
char
*destination;
size_t
length;
assert(source != (const char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
length=strlen(source)+MaxTextExtent+1;
destination=(char *) AcquireQuantumMemory(length,sizeof(*destination));
if (destination == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*destination='\0';
(void) CopyMagickString(destination,source,length);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AverageImages() takes a set of images and averages them together. Each
% image in the set must have the same width and height. AverageImages()
% returns a single image with each corresponding pixel component of each
% image averaged. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MeanEvaluateOperator,exception);
%
% The format of the AverageImages method is:
%
% Image *AverageImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MeanEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Extract a channel from the image. A channel is a particular color component
% of each pixel in the image.
%
% Deprecated, replace with:
%
% SeparateImageChannel(image,channel);
%
% The format of the ChannelImage method is:
%
% unsigned int ChannelImage(Image *image,const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel,
% or BlackChannel.
%
*/
MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel)
{
return(SeparateImageChannel(image,channel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ChannelThresholdImage method is:
%
% unsigned int ChannelThresholdImage(Image *image,const char *level)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: define the threshold values.
%
*/
MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level)
{
MagickPixelPacket
threshold;
GeometryInfo
geometry_info;
unsigned int
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
status=BilevelImageChannel(image,RedChannel,threshold.red);
status&=BilevelImageChannel(image,GreenChannel,threshold.green);
status&=BilevelImageChannel(image,BlueChannel,threshold.blue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPathImage() sets the image clip mask based any clipping path information
% if it exists.
%
% Deprecated, replace with:
%
% ClipImagePath(image,pathname,inside);
%
% The format of the ClipImage method is:
%
% MagickBooleanType ClipPathImage(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname,
const MagickBooleanType inside)
{
return(ClipImagePath(image,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageAttributes() clones one or more image attributes.
%
% Deprecated, replace with:
%
% CloneImageProperties(image,clone_image);
%
% The format of the CloneImageAttributes method is:
%
% MagickBooleanType CloneImageAttributes(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageAttributes(Image *image,
const Image *clone_image)
{
return(CloneImageProperties(image,clone_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneMemory() copies size bytes from memory area source to the destination.
% Copying between objects that overlap will take place correctly. It returns
% destination.
%
% The format of the CloneMemory method is:
%
% void *CloneMemory(void *destination,const void *source,
% const size_t size)
%
% A description of each parameter follows:
%
% o destination: the destination.
%
% o source: the source.
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *CloneMemory(void *destination,const void *source,
const size_t size)
{
register const unsigned char
*p;
register unsigned char
*q;
register ssize_t
i;
assert(destination != (void *) NULL);
assert(source != (const void *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
p=(const unsigned char *) source;
q=(unsigned char *) destination;
if ((p <= q) || ((p+size) >= q))
return(memcpy(destination,source,size));
/*
Overlap, copy backwards.
*/
p+=size;
q+=size;
for (i=(ssize_t) (size-1); i >= 0; i--)
*--q=(*--p);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o s e C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloseCacheView() closes the specified view returned by a previous call to
% OpenCacheView().
%
% Deprecated, replace with:
%
% DestroyCacheView(view_info);
%
% The format of the CloseCacheView method is:
%
% CacheView *CloseCacheView(CacheView *view_info)
%
% A description of each parameter follows:
%
% o view_info: the address of a structure of type CacheView.
%
*/
MagickExport CacheView *CloseCacheView(CacheView *view_info)
{
return(DestroyCacheView(view_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the ColorFloodfillImage method is:
%
% MagickBooleanType ColorFloodfillImage(Image *image,
% const DrawInfo *draw_info,const PixelPacket target,
% const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryImageException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
MagickExport MagickBooleanType ColorFloodfillImage(Image *image,
const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
PixelPacket
fill_color;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
}
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentGenesis() instantiates the constitute component.
%
% The format of the ConstituteComponentGenesis method is:
%
% MagickBooleanType ConstituteComponentGenesis(void)
%
*/
MagickExport MagickBooleanType ConstituteComponentGenesis(void)
{
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentTerminus() destroys the constitute component.
%
% The format of the ConstituteComponentTerminus method is:
%
% ConstituteComponentTerminus(void)
%
*/
MagickExport void ConstituteComponentTerminus(void)
{
}
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToHBITMAP() extracts a specified region of the image and returns
% it as a Windows HBITMAP. While the same functionality can be accomplished by
% invoking CropImage() followed by ImageToHBITMAP(), this method is more
% efficient since it copies pixels directly to the HBITMAP.
%
% The format of the CropImageToHBITMAP method is:
%
% HBITMAP CropImageToHBITMAP(Image* image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *CropImageToHBITMAP(Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
BITMAP
bitmap;
HBITMAP
bitmapH;
HANDLE
bitmap_bitsH;
MagickBooleanType
proceed;
RectangleInfo
page;
register const PixelPacket
*p;
register RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (((geometry->x+(ssize_t) geometry->width) < 0) ||
((geometry->y+(ssize_t) geometry->height) < 0) ||
(geometry->x >= (ssize_t) image->columns) ||
(geometry->y >= (ssize_t) image->rows))
ThrowImageException(OptionError,"GeometryDoesNotContainImage");
page=(*geometry);
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if (page.x < 0)
{
page.width+=page.x;
page.x=0;
}
if (page.y < 0)
{
page.height+=page.y;
page.y=0;
}
if ((page.width == 0) || (page.height == 0))
ThrowImageException(OptionError,"GeometryDimensionsAreZero");
/*
Initialize crop image attributes.
*/
bitmap.bmType = 0;
bitmap.bmWidth = (LONG) page.width;
bitmap.bmHeight = (LONG) page.height;
bitmap.bmWidthBytes = bitmap.bmWidth * 4;
bitmap.bmPlanes = 1;
bitmap.bmBitsPixel = 32;
bitmap.bmBits = NULL;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,page.width*
page.height*bitmap.bmBitsPixel);
if (bitmap_bitsH == NULL)
return(NULL);
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
if ( bitmap.bmBits == NULL )
bitmap.bmBits = bitmap_bits;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
SetImageColorspace(image,sRGBColorspace);
/*
Extract crop image.
*/
q=bitmap_bits;
for (y=0; y < (ssize_t) page.height; y++)
{
register ssize_t
x;
p=GetVirtualPixels(image,page.x,page.y+y,page.width,1,exception);
if (p == (const PixelPacket *) NULL)
break;
/* Transfer pixels, scaling to Quantum */
for( x=(ssize_t) page.width ; x> 0 ; x-- )
{
q->rgbRed = ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen = ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue = ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved = 0;
p++;
q++;
}
proceed=SetImageProgress(image,CropImageTag,y,page.height);
if (proceed == MagickFalse)
break;
}
if (y < (ssize_t) page.height)
{
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) NULL);
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageAttribute() deletes an attribute from the image.
%
% Deprecated, replace with:
%
% DeleteImageProperty(image,key);
%
% The format of the DeleteImageAttribute method is:
%
% MagickBooleanType DeleteImageAttribute(Image *image,const char *key)
%
% A description of each parameter follows:
%
% o image: the image info.
%
% o key: the image key.
%
*/
MagickExport MagickBooleanType DeleteImageAttribute(Image *image,
const char *key)
{
return(DeleteImageProperty(image,key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageList() deletes an image at the specified position in the list.
%
% The format of the DeleteImageList method is:
%
% unsigned int DeleteImageList(Image *images,const ssize_t offset)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
*/
MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset)
{
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return(MagickFalse);
images=GetNextImageInList(images);
}
DeleteImageFromList(&images);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteMagickRegistry() deletes an entry in the registry as defined by the id.
% It returns MagickTrue if the entry is deleted otherwise MagickFalse if no
% entry is found in the registry that matches the id.
%
% Deprecated, replace with:
%
% char key[MaxTextExtent];
% FormatLocaleString(key,MaxTextExtent,"%ld\n",id);
% DeleteImageRegistry(key);
%
% The format of the DeleteMagickRegistry method is:
%
% MagickBooleanType DeleteMagickRegistry(const ssize_t id)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
*/
MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id)
{
char
key[MaxTextExtent];
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
return(DeleteImageRegistry(key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C o n s t i t u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyConstitute() destroys the constitute component.
%
% The format of the DestroyConstitute method is:
%
% DestroyConstitute(void)
%
*/
MagickExport void DestroyConstitute(void)
{
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagickRegistry() deallocates memory associated the magick registry.
%
% Deprecated, replace with:
%
% RegistryComponentTerminus();
%
% The format of the DestroyMagickRegistry method is:
%
% void DestroyMagickRegistry(void)
%
*/
MagickExport void DestroyMagickRegistry(void)
{
RegistryComponentTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DescribeImage() describes an image by printing its attributes to the file.
% Attributes include the image width, height, size, and others.
%
% Deprecated, replace with:
%
% IdentifyImage(image,file,verbose);
%
% The format of the DescribeImage method is:
%
% MagickBooleanType DescribeImage(Image *image,FILE *file,
% const MagickBooleanType verbose)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o file: the file, typically stdout.
%
% o verbose: A value other than zero prints more detailed information
% about the image.
%
*/
MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file,
const MagickBooleanType verbose)
{
return(IdentifyImage(image,file,verbose));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageAttributes() deallocates memory associated with the image
% attribute list.
%
% The format of the DestroyImageAttributes method is:
%
% DestroyImageAttributes(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageAttributes(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->attributes != (void *) NULL)
image->attributes=(void *) DestroySplayTree((SplayTreeInfo *)
image->attributes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImages() destroys an image list.
%
% Deprecated, replace with:
%
% DestroyImageList(image);
%
% The format of the DestroyImages method is:
%
% void DestroyImages(Image *image)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
*/
MagickExport void DestroyImages(Image *image)
{
if (image == (Image *) NULL)
return;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
image=DestroyImageList(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagick() destroys the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreTerminus();
%
% The format of the DestroyMagick function is:
%
% DestroyMagick(void)
%
*/
MagickExport void DestroyMagick(void)
{
MagickCoreTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s p a t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DispatchImage() extracts pixel data from an image and returns it to you.
% The method returns MagickFalse on success otherwise MagickTrue if an error is
% encountered. The data is returned as char, short int, int, ssize_t, float,
% or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% Deprecated, replace with:
%
% ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
% exception);
%
% The format of the DispatchImage method is:
%
% unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,const size_t columns,
% const size_t rows,const char *map,const StorageType type,
% void *pixels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset, y_offset, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or
% I = intensity (for grayscale).
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or
% DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
unsigned int
status;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t r a c t S u b i m a g e F r o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtractSubimageFromImageImage() extracts a region of the image that most
% closely resembles the reference.
%
% The format of the ExtractSubimageFromImageImage method is:
%
% Image *ExtractSubimageFromImage(const Image *image,
% const Image *reference,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const ssize_t x_offset,const ssize_t y_offset,
const double similarity_threshold,ExceptionInfo *exception)
{
CacheView
*image_view,
*reference_view;
double
channels,
normalized_similarity,
similarity;
ssize_t
y;
/*
Compute the similarity in pixels between two images.
*/
normalized_similarity=1.0;
similarity=0.0;
channels=3;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
channels++;
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
channels++;
image_view=AcquireVirtualCacheView(image,exception);
reference_view=AcquireVirtualCacheView(reference,exception);
for (y=0; y < (ssize_t) reference->rows; y++)
{
register const IndexPacket
*indexes,
*reference_indexes;
register const PixelPacket
*p,
*q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y,
reference->columns,1,exception);
q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reference_indexes=GetCacheViewVirtualIndexQueue(reference_view);
for (x=0; x < (ssize_t) reference->columns; x++)
{
MagickRealType
pixel;
pixel=QuantumScale*(GetPixelRed(p)-(double)
GetPixelRed(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(p)-(double)
GetPixelGreen(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(p)-(double)
GetPixelBlue(q));
similarity+=pixel*pixel;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
{
pixel=QuantumScale*(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
similarity+=pixel*pixel;
}
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
{
pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reference_indexes+x));
similarity+=pixel*pixel;
}
p++;
q++;
}
normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/
channels;
if (normalized_similarity > similarity_threshold)
break;
}
reference_view=DestroyCacheView(reference_view);
image_view=DestroyCacheView(image_view);
return(normalized_similarity);
}
MagickExport Image *ExtractSubimageFromImage(Image *image,
const Image *reference,ExceptionInfo *exception)
{
double
similarity_threshold;
RectangleInfo
offset;
ssize_t
y;
/*
Extract reference from image.
*/
if ((reference->columns > image->columns) || (reference->rows > image->rows))
return((Image *) NULL);
similarity_threshold=(double) image->columns*image->rows;
SetGeometry(reference,&offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows); y++)
{
double
similarity;
register ssize_t
x;
for (x=0; x < (ssize_t) (image->columns-reference->columns); x++)
{
similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold,
exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExtractSubimageFromImage)
#endif
if (similarity < similarity_threshold)
{
similarity_threshold=similarity;
offset.x=x;
offset.y=y;
}
}
}
if (similarity_threshold > (QuantumScale*reference->fuzz/100.0))
return((Image *) NULL);
return(CropImage(image,&offset,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l a t t e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlattenImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,FlattenLayer,exception);
%
% The format of the FlattenImage method is:
%
% Image *FlattenImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,FlattenLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatImageAttribute() permits formatted key/value pairs to be saved as an
% image attribute.
%
% The format of the FormatImageAttribute method is:
%
% MagickBooleanType FormatImageAttribute(Image *image,const char *key,
% const char *format,...)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o key: The attribute key.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport MagickBooleanType FormatImageAttributeList(Image *image,
const char *key,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,key,value));
}
MagickExport MagickBooleanType FormatImagePropertyList(Image *image,
const char *property,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,property,value));
}
MagickExport MagickBooleanType FormatImageAttribute(Image *image,
const char *key,const char *format,...)
{
char
value[MaxTextExtent];
int
n;
va_list
operands;
va_start(operands,format);
n=FormatLocaleStringList(value,MaxTextExtent,format,operands);
(void) n;
va_end(operands);
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t M a g i c k S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatMagickString() prints formatted output of a variable argument list.
%
% The format of the FormatMagickString method is:
%
% ssize_t FormatMagickString(char *string,const size_t length,
% const char *format,...)
%
% A description of each parameter follows.
%
% o string: FormatMagickString() returns the formatted string in this
% character buffer.
%
% o length: the maximum length of the string.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport ssize_t FormatMagickStringList(char *string,const size_t length,
const char *format,va_list operands)
{
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,length,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[length-1]='\0';
return((ssize_t) n);
}
MagickExport ssize_t FormatMagickString(char *string,const size_t length,
const char *format,...)
{
ssize_t
n;
va_list
operands;
va_start(operands,format);
n=(ssize_t) FormatMagickStringList(string,length,format,operands);
va_end(operands);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatString() prints formatted output of a variable argument list.
%
% The format of the FormatString method is:
%
% void FormatString(char *string,const char *format,...)
%
% A description of each parameter follows.
%
% o string: Method FormatString returns the formatted string in this
% character buffer.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport void FormatStringList(char *string,const char *format,
va_list operands)
{
int
n;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,MaxTextExtent,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[MaxTextExtent-1]='\0';
}
MagickExport void FormatString(char *string,const char *format,...)
{
va_list
operands;
va_start(operands,format);
(void) FormatLocaleStringList(string,MaxTextExtent,format,operands);
va_end(operands);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r M a t c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorMatch() returns true if two pixels are identical in color.
%
% The format of the ColorMatch method is:
%
% void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q,
% const double fuzz)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
% o distance: Define how much tolerance is acceptable to consider
% two colors as the same.
%
*/
MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p,
const PixelPacket *q,const double fuzz)
{
MagickPixelPacket
pixel;
register MagickRealType
distance;
if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) &&
(GetPixelGreen(p) == GetPixelGreen(q)) &&
(GetPixelBlue(p) == GetPixelBlue(q)))
return(MagickTrue);
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
distance=pixel.red*pixel.red;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.green=GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q);
distance+=pixel.green*pixel.green;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q);
distance+=pixel.blue*pixel.blue;
if (distance > (fuzz*fuzz))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorCompare() returns MagickTrue if the distance between two colors is
% less than the specified distance in a linear three dimensional color space.
% This method is used by ColorFloodFill() and other algorithms which
% compare two colors.
%
% The format of the FuzzyColorCompare method is:
%
% void FuzzyColorCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyColorCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsColorSimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y O p a c i t y C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyOpacityCompare() returns true if the distance between two opacity
% values is less than the specified distance in a linear color space. This
% method is used by MatteFloodFill() and other algorithms which compare
% two opacity values.
%
% Deprecated, replace with:
%
% IsOpacitySimilar(image,p,q);
%
% The format of the FuzzyOpacityCompare method is:
%
% void FuzzyOpacityCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsOpacitySimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C o n f i g u r e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetConfigureBlob() returns the specified configure file as a blob.
%
% The format of the GetConfigureBlob method is:
%
% void *GetConfigureBlob(const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o filename: the configure file name.
%
% o path: return the full path information of the configure file.
%
% o length: This pointer to a size_t integer sets the initial length of the
% blob. On return, it reflects the actual length of the blob.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetConfigureBlob(const char *filename,char *path,
size_t *length,ExceptionInfo *exception)
{
void
*blob;
assert(filename != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(path != (char *) NULL);
assert(length != (size_t *) NULL);
assert(exception != (ExceptionInfo *) NULL);
blob=(void *) NULL;
(void) CopyMagickString(path,filename,MaxTextExtent);
#if defined(MAGICKCORE_INSTALLED_SUPPORT)
#if defined(MAGICKCORE_LIBRARY_PATH)
if (blob == (void *) NULL)
{
/*
Search hard coded paths.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s",
MAGICKCORE_LIBRARY_PATH,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
#endif
#if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH))
if (blob == (void *) NULL)
{
unsigned char
*key_value;
/*
Locate file via registry key.
*/
key_value=NTRegistryKeyLookup("ConfigurePath");
if (key_value != (unsigned char *) NULL)
{
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *)
key_value,DirectorySeparator,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
}
#endif
#else
if (blob == (void *) NULL)
{
char
*home;
home=GetEnvironmentValue("MAGICK_HOME");
if (home != (char *) NULL)
{
/*
Search MAGICK_HOME.
*/
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home,
DirectorySeparator,filename);
#else
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
home=GetEnvironmentValue("HOME");
if (home == (char *) NULL)
home=GetEnvironmentValue("USERPROFILE");
if (home != (char *) NULL)
{
/*
Search $HOME/.magick.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home,
DirectorySeparator,DirectorySeparator,filename);
if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL))
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
}
if ((blob == (void *) NULL) && (*GetClientPath() != '\0'))
{
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(),
DirectorySeparator,filename);
#else
char
prefix[MaxTextExtent];
/*
Search based on executable directory if directory is known.
*/
(void) CopyMagickString(prefix,GetClientPath(),
MaxTextExtent);
ChopPathComponents(prefix,1);
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
/*
Search current directory.
*/
if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse))
blob=FileToBlob(path,~0UL,length,exception);
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
Search Windows registry.
*/
if (blob == (void *) NULL)
blob=NTResourceToBlob(filename);
#endif
#endif
if (blob == (void *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning,
"UnableToOpenConfigureFile","`%s'",path);
return(blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheView() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheView method is:
%
% PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the address of a structure of type CacheView.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticIndexQueue(cache_view);
%
% The format of the GetCacheViewIndexes method is:
%
% IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
{
return(GetCacheViewAuthenticIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheViewPixels method is:
%
% PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetExceptionInfo() initializes an exception to default values.
%
% The format of the GetExceptionInfo method is:
%
% GetExceptionInfo(ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
*/
MagickExport void GetExceptionInfo(ExceptionInfo *exception)
{
assert(exception != (ExceptionInfo *) NULL);
(void) memset(exception,0,sizeof(*exception));
exception->severity=UndefinedException;
exception->exceptions=(void *) NewLinkedList(0);
exception->semaphore=AllocateSemaphoreInfo();
exception->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAttribute() searches the list of image attributes and returns
% a pointer to the attribute if it exists otherwise NULL.
%
% The format of the GetImageAttribute method is:
%
% const ImageAttribute *GetImageAttribute(const Image *image,
% const char *key)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: These character strings are the name of an image attribute to
% return.
%
*/
static void *DestroyAttribute(void *attribute)
{
register ImageAttribute
*p;
p=(ImageAttribute *) attribute;
if (p->value != (char *) NULL)
p->value=DestroyString(p->value);
return(RelinquishMagickMemory(p));
}
MagickExport const ImageAttribute *GetImageAttribute(const Image *image,
const char *key)
{
const char
*value;
ImageAttribute
*attribute;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
value=GetImageProperty(image,key);
if (value == (const char *) NULL)
return((const ImageAttribute *) NULL);
if (image->attributes == (void *) NULL)
((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,DestroyAttribute);
else
{
const ImageAttribute
*attribute;
attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *)
image->attributes,key);
if (attribute != (const ImageAttribute *) NULL)
return(attribute);
}
attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute));
if (attribute == (ImageAttribute *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(attribute,0,sizeof(*attribute));
attribute->key=ConstantString(key);
attribute->value=ConstantString(value);
(void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes,
attribute->key,attribute);
return((const ImageAttribute *) attribute);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p p i n g P a t h A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClippingPathAttribute() searches the list of image attributes and
% returns a pointer to a clipping path if it exists otherwise NULL.
%
% Deprecated, replace with:
%
% GetImageAttribute(image,"8BIM:1999,2998");
%
% The format of the GetImageClippingPathAttribute method is:
%
% const ImageAttribute *GetImageClippingPathAttribute(Image *image)
%
% A description of each parameter follows:
%
% o attribute: Method GetImageClippingPathAttribute returns the clipping
% path if it exists otherwise NULL.
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image)
{
return(GetImageAttribute(image,"8BIM:1999,2998"));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F r o m M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFromMagickRegistry() gets an image from the registry as defined by
% its name. If the image is not found, a NULL image is returned.
%
% Deprecated, replace with:
%
% GetImageRegistry(ImageRegistryType,name,exception);
%
% The format of the GetImageFromMagickRegistry method is:
%
% Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o name: the name of the image to retrieve from the registry.
%
% o id: the registry id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
ExceptionInfo *exception)
{
*id=0L;
return((Image *) GetImageRegistry(ImageRegistryType,name,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickRegistry() gets a blob from the registry as defined by the id. If
% the blob that matches the id is not found, NULL is returned.
%
% The format of the GetMagickRegistry method is:
%
% const void *GetMagickRegistry(const ssize_t id,RegistryType *type,
% size_t *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
% o type: the registry type.
%
% o length: the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type,
size_t *length,ExceptionInfo *exception)
{
char
key[MaxTextExtent];
void
*blob;
*type=UndefinedRegistryType;
*length=0;
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
blob=(void *) GetImageRegistry(ImageRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
return((void *) GetImageRegistry(UndefinedRegistryType,key,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k T o k e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickToken() gets a token from the token stream. A token is defined as
% a sequence of characters delimited by whitespace (e.g. clip-path), a
% sequence delimited with quotes (.e.g "Quote me"), or a sequence enclosed in
% parenthesis (e.g. rgb(0,0,0)). GetMagickToken() also recognizes these
% separator characters: ':', '=', ',', and ';'.
%
% The format of the GetMagickToken method is:
%
% void GetMagickToken(const char *start,const char **end,char *token)
%
% A description of each parameter follows:
%
% o start: the start of the token sequence.
%
% o end: point to the end of the token sequence.
%
% o token: copy the token to this buffer.
%
*/
MagickExport void GetMagickToken(const char *start,const char **end,char *token)
{
GetNextToken(start,end,~0UL,token);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageGeometry() returns a region as defined by the geometry string with
% respect to the image and its gravity.
%
% Deprecated, replace with:
%
% if (size_to_fit != MagickFalse)
% ParseRegionGeometry(image,geometry,region_info,&image->exception); else
% ParsePageGeometry(image,geometry,region_info,&image->exception);
%
% The format of the GetImageGeometry method is:
%
% int GetImageGeometry(Image *image,const char *geometry,
% const unsigned int size_to_fit,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o flags: Method GetImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o size_to_fit: A value other than 0 means to scale the region so it
% fits within the specified width and height.
%
% o region_info: the region as defined by the geometry string with
% respect to the image and its gravity.
%
*/
MagickExport int GetImageGeometry(Image *image,const char *geometry,
const unsigned int size_to_fit,RectangleInfo *region_info)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4");
if (size_to_fit != MagickFalse)
return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception));
return((int) ParsePageGeometry(image,geometry,region_info,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageList() returns an image at the specified position in the list.
%
% Deprecated, replace with:
%
% CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
% exception);
%
% The format of the GetImageList method is:
%
% Image *GetImageList(const Image *images,const ssize_t offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageList(const Image *images,const ssize_t offset,
ExceptionInfo *exception)
{
Image
*image;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
exception);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListIndex() returns the position in the list of the specified
% image.
%
% Deprecated, replace with:
%
% GetImageIndexInList(images);
%
% The format of the GetImageListIndex method is:
%
% ssize_t GetImageListIndex(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport ssize_t GetImageListIndex(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageIndexInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListSize() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(images);
%
% The format of the GetImageListSize method is:
%
% size_t GetImageListSize(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport size_t GetImageListSize(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageListLength(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in RAM, or in a memory-mapped file. The returned pointer
% should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels()
% to obtain the black color component or colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% GetAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the GetImagePixels() method is:
%
% PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetIndexes() returns the black channel or the colormap indexes associated
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the black channel or colormap indexes are not available.
%
% Deprecated, replace with:
%
% GetAuthenticIndexQueue(image);
%
% The format of the GetIndexes() method is:
%
% IndexPacket *GetIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: GetIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetIndexes(const Image *image)
{
return(GetAuthenticIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <, >,
% and ~.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the GetMagickGeometry method is:
%
% unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,
ssize_t *y,size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImage() returns the next image in a list.
%
% Deprecated, replace with:
%
% GetNextImageInList(images);
%
% The format of the GetNextImage method is:
%
% Image *GetNextImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetNextImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetNextImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageAttribute() gets the next image attribute.
%
% Deprecated, replace with:
%
% const char *property;
% property=GetNextImageProperty(image);
% if (property != (const char *) NULL)
% GetImageAttribute(image,property);
%
% The format of the GetNextImageAttribute method is:
%
% const ImageAttribute *GetNextImageAttribute(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image)
{
const char
*property;
property=GetNextImageProperty(image);
if (property == (const char *) NULL)
return((const ImageAttribute *) NULL);
return(GetImageAttribute(image,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N u m b e r S c e n e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNumberScenes() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(image);
%
% The format of the GetNumberScenes method is:
%
% unsigned int GetNumberScenes(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport unsigned int GetNumberScenes(const Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return((unsigned int) GetImageListLength(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs.
%
% Deprecated, replace with:
%
% GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
%
% The format of the GetOnePixel() method is:
%
% PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
*/
MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y)
{
PixelPacket
pixel;
(void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetAuthenticPixels().
%
% Deprecated, replace with:
%
% GetAuthenticPixelQueue(image);
%
% The format of the GetPixels() method is:
%
% PixelPacket *GetPixels(const Image image)
%
% A description of each parameter follows:
%
% o pixels: GetPixels() returns the pixels associated with the last call
% to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetPixels(const Image *image)
{
return(GetAuthenticPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P r e v i o u s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPreviousImage() returns the previous image in a list.
%
% Deprecated, replace with:
%
% GetPreviousImageInList(images));
%
% The format of the GetPreviousImage method is:
%
% Image *GetPreviousImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetPreviousImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetPreviousImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H S L T r a n s f o r m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HSLTransform() converts a (hue, saturation, lightness) to a (red, green,
% blue) triple.
%
% The format of the HSLTransformImage method is:
%
% void HSLTransform(const double hue,const double saturation,
% const double lightness,Quantum *red,Quantum *green,Quantum *blue)
%
% A description of each parameter follows:
%
% o hue, saturation, lightness: A double value representing a
% component of the HSL color space.
%
% o red, green, blue: A pointer to a pixel component of type Quantum.
%
*/
static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2,
MagickRealType hue)
{
if (hue < 0.0)
hue+=1.0;
if (hue > 1.0)
hue-=1.0;
if ((6.0*hue) < 1.0)
return(m1+6.0*(m2-m1)*hue);
if ((2.0*hue) < 1.0)
return(m2);
if ((3.0*hue) < 2.0)
return(m1+6.0*(m2-m1)*(2.0/3.0-hue));
return(m1);
}
MagickExport void HSLTransform(const double hue,const double saturation,
const double lightness,Quantum *red,Quantum *green,Quantum *blue)
{
MagickRealType
b,
g,
r,
m1,
m2;
/*
Convert HSL to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
if (lightness <= 0.5)
m2=lightness*(saturation+1.0);
else
m2=lightness+saturation-lightness*saturation;
m1=2.0*lightness-m2;
r=HueToRGB(m1,m2,hue+1.0/3.0);
g=HueToRGB(m1,m2,hue);
b=HueToRGB(m1,m2,hue-1.0/3.0);
*red=ClampToQuantum((MagickRealType) QuantumRange*r);
*green=ClampToQuantum((MagickRealType) QuantumRange*g);
*blue=ClampToQuantum((MagickRealType) QuantumRange*b);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i t y A f f i n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentityAffine() initializes the affine transform to the identity matrix.
%
% The format of the IdentityAffine method is:
%
% IdentityAffine(AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o affine: A pointer the affine transform of type AffineMatrix.
%
*/
MagickExport void IdentityAffine(AffineMatrix *affine)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(affine != (AffineMatrix *) NULL);
(void) memset(affine,0,sizeof(AffineMatrix));
affine->sx=1.0;
affine->sy=1.0;
}
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImageToHBITMAP() creates a Windows HBITMAP from an image.
%
% The format of the ImageToHBITMAP method is:
%
% HBITMAP ImageToHBITMAP(Image *image,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to convert.
%
*/
MagickExport void *ImageToHBITMAP(Image *image,ExceptionInfo *exception)
{
BITMAP
bitmap;
HANDLE
bitmap_bitsH;
HBITMAP
bitmapH;
register ssize_t
x;
register const PixelPacket
*p;
register RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
size_t
length;
ssize_t
y;
(void) memset(&bitmap,0,sizeof(bitmap));
bitmap.bmType=0;
bitmap.bmWidth=(LONG) image->columns;
bitmap.bmHeight=(LONG) image->rows;
bitmap.bmWidthBytes=4*bitmap.bmWidth;
bitmap.bmPlanes=1;
bitmap.bmBitsPixel=32;
bitmap.bmBits=NULL;
length=bitmap.bmWidthBytes*bitmap.bmHeight;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,length);
if (bitmap_bitsH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
return(NULL);
}
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
q=bitmap_bits;
if (bitmap.bmBits == NULL)
bitmap.bmBits=bitmap_bits;
(void) SetImageColorspace(image,sRGBColorspace);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
q->rgbRed=ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen=ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue=ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved=0;
p++;
q++;
}
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
if (bitmapH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
}
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n i t i a l i z e M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeMagick() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the InitializeMagick function is:
%
% InitializeMagick(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void InitializeMagick(const char *path)
{
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelColor() applies bi-linear or tri-linear interpolation
% between a pixel and it's neighbors.
%
% The format of the InterpolatePixelColor method is:
%
% MagickPixelPacket InterpolatePixelColor(const Image *image,
% CacheView *view_info,InterpolatePixelMethod method,const double x,
% const double y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image cache view.
%
% o type: the type of pixel color interpolation.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx,
MagickPixelPacket *pixel)
{
MagickRealType
dx2,
p,
q,
r,
s;
dx2=dx*dx;
p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red);
q=(pixels[0].red-pixels[1].red)-p;
r=pixels[2].red-pixels[0].red;
s=pixels[1].red;
pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green);
q=(pixels[0].green-pixels[1].green)-p;
r=pixels[2].green-pixels[0].green;
s=pixels[1].green;
pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue);
q=(pixels[0].blue-pixels[1].blue)-p;
r=pixels[2].blue-pixels[0].blue;
s=pixels[1].blue;
pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity);
q=(pixels[0].opacity-pixels[1].opacity)-p;
r=pixels[2].opacity-pixels[0].opacity;
s=pixels[1].opacity;
pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
if (pixel->colorspace == CMYKColorspace)
{
p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index);
q=(pixels[0].index-pixels[1].index)-p;
r=pixels[2].index-pixels[0].index;
s=pixels[1].index;
pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
}
}
static inline MagickRealType CubicWeightingFunction(const MagickRealType x)
{
MagickRealType
alpha,
gamma;
alpha=MagickMax(x+2.0,0.0);
gamma=1.0*alpha*alpha*alpha;
alpha=MagickMax(x+1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
alpha=MagickMax(x+0.0,0.0);
gamma+=6.0*alpha*alpha*alpha;
alpha=MagickMax(x-1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
return(gamma/6.0);
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
static inline ssize_t NearestNeighbor(MagickRealType x)
{
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image,
CacheView *image_view,const InterpolatePixelMethod method,const double x,
const double y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
GetMagickPixelPacket(image,&pixel);
switch (method)
{
case AverageInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
gamma=alpha[i];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*0.0625*pixels[i].red;
pixel.green+=gamma*0.0625*pixels[i].green;
pixel.blue+=gamma*0.0625*pixels[i].blue;
pixel.opacity+=0.0625*pixels[i].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*0.0625*pixels[i].index;
p++;
}
break;
}
case BicubicInterpolatePixel:
{
MagickPixelPacket
pixels[16],
u[4];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
GetMagickPixelPacket(image,u+i);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+4*i);
BicubicInterpolate(pixels+4*i,delta.x,u+i);
}
delta.y=y-floor(y);
BicubicInterpolate(u,delta.y,&pixel);
break;
}
case BilinearInterpolatePixel:
default:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y*
((1.0-delta.x)*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x*
pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x*
pixels[3].red));
pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x*
pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+
delta.x*pixels[3].green));
pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x*
pixels[3].blue));
pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x*
pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x*
pixels[3].opacity));
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x*
pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x*
pixels[3].index));
break;
}
case FilterInterpolatePixel:
{
Image
*excerpt_image,
*filter_image;
MagickPixelPacket
pixels[1];
RectangleInfo
geometry;
geometry.width=4L;
geometry.height=4L;
geometry.x=(ssize_t) floor(x)-1L;
geometry.y=(ssize_t) floor(y)-1L;
excerpt_image=ExcerptImage(image,&geometry,exception);
if (excerpt_image == (Image *) NULL)
break;
filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur,
exception);
excerpt_image=DestroyImage(excerpt_image);
if (filter_image == (Image *) NULL)
break;
p=GetVirtualPixels(filter_image,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
filter_image=DestroyImage(filter_image);
break;
}
indexes=GetVirtualIndexQueue(filter_image);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
filter_image=DestroyImage(filter_image);
break;
}
case IntegerInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case MeshInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[4];
MagickRealType
alpha[4];
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3);
luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2);
if (fabs(luminance.x) < fabs(luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel:2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity,
pixels[3].opacity,pixels[0].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index,
pixels[3].index,pixels[0].index);
}
else
{
/*
Top-right triangle (pixel:1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity,
pixels[0].opacity,pixels[3].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index,
pixels[0].index,pixels[3].index);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity,
pixels[1].opacity,pixels[2].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index,
pixels[1].index,pixels[2].index);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity,
pixels[2].opacity,pixels[1].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index,
pixels[2].index,pixels[1].index);
}
}
break;
}
case NearestNeighborInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x),
NearestNeighbor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case SplineInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16],
dx,
dy;
PointInfo
delta;
ssize_t
j,
n;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
n=0;
delta.x=x-floor(x);
delta.y=y-floor(y);
for (i=(-1); i < 3L; i++)
{
dy=CubicWeightingFunction((MagickRealType) i-delta.y);
for (j=(-1); j < 3L; j++)
{
GetMagickPixelPacket(image,pixels+n);
SetMagickPixelPacket(image,p,indexes+n,pixels+n);
alpha[n]=1.0;
if (image->matte != MagickFalse)
{
alpha[n]=QuantumScale*((MagickRealType)
GetPixelAlpha(p));
pixels[n].red*=alpha[n];
pixels[n].green*=alpha[n];
pixels[n].blue*=alpha[n];
if (image->colorspace == CMYKColorspace)
pixels[n].index*=alpha[n];
}
dx=CubicWeightingFunction(delta.x-(MagickRealType) j);
gamma=alpha[n];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*dx*dy*pixels[n].red;
pixel.green+=gamma*dx*dy*pixels[n].green;
pixel.blue+=gamma*dx*dy*pixels[n].blue;
if (image->matte != MagickFalse)
pixel.opacity+=dx*dy*pixels[n].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*dx*dy*pixels[n].index;
n++;
p++;
}
}
break;
}
}
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageAttributes() replaces any embedded formatting characters with
% the appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the InterpretImageAttributes method is:
%
% char *InterpretImageAttributes(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *InterpretImageAttributes(const ImageInfo *image_info,
Image *image,const char *embed_text)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InversesRGBCompandor() removes the gamma function from a sRGB pixel.
%
% The format of the InversesRGBCompandor method is:
%
% MagickRealType InversesRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92);
return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M a g i c k I n s t a n t i a t e d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMagickInstantiated() returns MagickTrue if the ImageMagick environment
% is currently instantiated: MagickCoreGenesis() has been called but
% MagickDestroy() has not.
%
% The format of the IsMagickInstantiated method is:
%
% MagickBooleanType IsMagickInstantiated(void)
%
*/
MagickExport MagickBooleanType IsMagickInstantiated(void)
{
return(IsMagickCoreInstantiated());
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s S u b i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsSubimage() returns MagickTrue if the geometry is a valid subimage
% specification (e.g. [1], [1-9], [1,7,4]).
%
% The format of the IsSubimage method is:
%
% unsigned int IsSubimage(const char *geometry,const unsigned int pedantic)
%
% A description of each parameter follows:
%
% o geometry: This string is the geometry specification.
%
% o pedantic: A value other than 0 invokes a more restrictive set of
% conditions for a valid specification (e.g. [1], [1-4], [4-1]).
%
*/
MagickExport unsigned int IsSubimage(const char *geometry,
const unsigned int pedantic)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((strchr(geometry,'x') != (char *) NULL) ||
(strchr(geometry,'X') != (char *) NULL))
return(MagickFalse);
if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() will map the given color to "black" and "white"
% values, limearly spreading out the colors, and level values on a channel by
% channel bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% Deprecated, replace with:
%
% LevelColorsImageChannel(image,channel,black_color,white_color,invert);
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
% const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
return(LevelColorsImageChannel(image,channel,black_color,white_color,invert));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateMemory() frees memory that has already been allocated, and NULL's
% the pointer to it.
%
% The format of the LiberateMemory method is:
%
% void LiberateMemory(void **memory)
%
% A description of each parameter follows:
%
% o memory: A pointer to a block of memory to free for reuse.
%
*/
MagickExport void LiberateMemory(void **memory)
{
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
return;
free(*memory);
*memory=(void *) NULL;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateSemaphoreInfo() relinquishes a semaphore.
%
% Deprecated, replace with:
%
% UnlockSemaphoreInfo(*semaphore_info);
%
% The format of the LiberateSemaphoreInfo method is:
%
% LiberateSemaphoreInfo(void **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
UnlockSemaphoreInfo(*semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k I n c a r n a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickIncarnate() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the MagickIncarnate function is:
%
% MagickIncarnate(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void MagickIncarnate(const char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o n i t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMonitor() calls the monitor handler method with a text string that
% describes the task and a measure of completion. The method returns
% MagickTrue on success otherwise MagickFalse if an error is encountered, e.g.
% if there was a user interrupt.
%
% The format of the MagickMonitor method is:
%
% MagickBooleanType MagickMonitor(const char *text,
% const MagickOffsetType offset,const MagickSizeType span,
% void *client_data)
%
% A description of each parameter follows:
%
% o offset: the position relative to the span parameter which represents
% how much progress has been made toward completing a task.
%
% o span: the span relative to completing a task.
%
% o client_data: the client data.
%
*/
MagickExport MagickBooleanType MagickMonitor(const char *text,
const MagickOffsetType offset,const MagickSizeType span,
void *magick_unused(client_data))
{
ExceptionInfo
*exception;
MagickBooleanType
status;
magick_unreferenced(client_data);
assert(text != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text);
ProcessPendingEvents(text);
status=MagickTrue;
exception=AcquireExceptionInfo();
if (monitor_handler != (MonitorHandler) NULL)
status=(*monitor_handler)(text,offset,span,exception);
exception=DestroyExceptionInfo(exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImage() replaces the colors of an image with the closest color from a
% reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImage(&quantize_info,image,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImage(Image *image,const Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the mapped image.
%
*/
MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(map_image != (Image *) NULL);
assert(map_image->signature == MagickCoreSignature);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImage(&quantize_info,image,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImages() replaces the colors of a sequence of images with the closest
% color from a reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImages(&quantize_info,images,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImages(Image *images,Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a set of Image structures.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the quantized image.
%
*/
MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImages(&quantize_info,images,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatteFloodfill() changes the transparency value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod
% is specified, the transparency value is changed for any neighbor pixel
% that does not match the bordercolor member of image.
%
% By default target must match a particular pixel transparency exactly.
% However, in many cases two transparency values may differ by a
% small amount. The fuzz member of image defines how much tolerance is
% acceptable to consider two transparency values as the same. For example,
% set fuzz to 10 and the opacity values of 100 and 102 respectively are
% now interpreted as the same value for the purposes of the floodfill.
%
% The format of the MatteFloodfillImage method is:
%
% MagickBooleanType MatteFloodfillImage(Image *image,
% const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
% const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType MatteFloodfillImage(Image *image,
const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q--;
p--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q++;
p++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
q->opacity=opacity;
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaximumImages() returns the maximum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MaxImages method is:
%
% Image *MaximumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinimumImages() returns the minimum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MinimumImages method is:
%
% Image *MinimumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The algorithm was contributed by Mike Edmonds and implements an insertion
% sort for selecting median color-channel values. For more on this algorithm
% see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William
% Pugh in the June 1990 of Communications of the ACM.
%
% The format of the MedianFilterImage method is:
%
% Image *MedianFilterImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MedianFilterImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*median_image;
median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t)
radius,exception);
return(median_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModeImage() makes each pixel the 'predominant color' of the neighborhood
% of the specified radius.
%
% The format of the ModeImage method is:
%
% Image *ModeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ModeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*mode_image;
mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius,
exception);
return(mode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MosaicImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,MosaicLayer,exception);
%
% The format of the MosaicImage method is:
%
% Image *MosaicImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image list to be composited together
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,MosaicLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaqueImage method is:
%
% MagickBooleanType OpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType OpaqueImage(Image *image,
const PixelPacket target,const PixelPacket fill)
{
#define OpaqueImageTag "Opaque/Image"
MagickBooleanType
proceed;
register ssize_t
i;
ssize_t
y;
/*
Make image color opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Make DirectClass image opaque.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
*q=fill;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
break;
}
case PseudoClass:
{
/*
Make PseudoClass image opaque.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse)
image->colormap[i]=fill;
}
if (fill.opacity != OpaqueOpacity)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=fill.opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
(void) SyncImage(image);
break;
}
}
if (fill.opacity != OpaqueOpacity)
image->matte=MagickTrue;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenCacheView() opens a view into the pixel cache, using the
% VirtualPixelMethod that is defined within the given image itself.
%
% Deprecated, replace with:
%
% AcquireVirtualCacheView(image,&image->exception);
%
% The format of the OpenCacheView method is:
%
% CacheView *OpenCacheView(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheView *OpenCacheView(const Image *image)
{
return(AcquireVirtualCacheView(image,&((Image *) image)->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n M a g i c k S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenMagickStream() opens the file at the specified path and return the
% associated stream.
%
% The path of the OpenMagickStream method is:
%
% FILE *OpenMagickStream(const char *path,const char *mode)
%
% A description of each parameter follows.
%
% o path: the file path.
%
% o mode: the file mode.
%
*/
#if defined(MAGICKCORE_HAVE__WFOPEN)
static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16)
{
register const unsigned char
*p;
if (utf16 != (wchar_t *) NULL)
{
register wchar_t
*q;
wchar_t
c;
/*
Convert UTF-8 to UTF-16.
*/
q=utf16;
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
*q=(*p);
else
if ((*p & 0xE0) == 0xC0)
{
c=(*p);
*q=(c & 0x1F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
if ((*p & 0xF0) == 0xE0)
{
c=(*p);
*q=c << 12;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
c=(*p);
*q|=(c & 0x3F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
return(0);
q++;
}
*q++='\0';
return(q-utf16);
}
/*
Compute UTF-16 string length.
*/
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
;
else
if ((*p & 0xE0) == 0xC0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
if ((*p & 0xF0) == 0xE0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
return(0);
}
return(p-utf8);
}
static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source)
{
size_t
length;
wchar_t
*utf16;
length=UTF8ToUTF16(source,(wchar_t *) NULL);
if (length == 0)
{
register ssize_t
i;
/*
Not UTF-8, just copy.
*/
length=strlen((const char *) source);
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
for (i=0; i <= (ssize_t) length; i++)
utf16[i]=source[i];
return(utf16);
}
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
length=UTF8ToUTF16(source,utf16);
return(utf16);
}
#endif
MagickExport FILE *OpenMagickStream(const char *path,const char *mode)
{
FILE
*file;
if ((path == (const char *) NULL) || (mode == (const char *) NULL))
{
errno=EINVAL;
return((FILE *) NULL);
}
file=(FILE *) NULL;
#if defined(MAGICKCORE_HAVE__WFOPEN)
{
wchar_t
*unicode_mode,
*unicode_path;
unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path);
if (unicode_path == (wchar_t *) NULL)
return((FILE *) NULL);
unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode);
if (unicode_mode == (wchar_t *) NULL)
{
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
return((FILE *) NULL);
}
file=_wfopen(unicode_path,unicode_mode);
unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode);
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
}
#endif
if (file == (FILE *) NULL)
file=fopen(path,mode);
return(file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% Deprecated, replace with:
%
% FloodfillPaintImage(image,channel,draw_info,target,x,y,
% method == FloodfillMethod ? MagickFalse : MagickTrue);
%
% The format of the PaintFloodfillImage method is:
%
% MagickBooleanType PaintFloodfillImage(Image *image,
% const ChannelType channel,const MagickPixelPacket target,
% const ssize_t x,const ssize_t y,const DrawInfo *draw_info,
% const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o draw_info: the draw info.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType PaintFloodfillImage(Image *image,
const ChannelType channel,const MagickPixelPacket *target,const ssize_t x,
const ssize_t y,const DrawInfo *draw_info,const PaintMethod method)
{
MagickBooleanType
status;
status=FloodfillPaintImage(image,channel,draw_info,target,x,y,
method == FloodfillMethod ? MagickFalse : MagickTrue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% P a i n t O p a q u e I m a g e %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
% OpaquePaintImageChannel(image,channel,target,fill,MagickFalse);
%
% The format of the PaintOpaqueImage method is:
%
% MagickBooleanType PaintOpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill)
% MagickBooleanType PaintOpaqueImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType PaintOpaqueImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill)
{
MagickBooleanType
status;
status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
return(status);
}
MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill)
{
return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintTransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% TransparentPaintImage(image,target,opacity,MagickFalse);
%
% The format of the PaintTransparentImage method is:
%
% MagickBooleanType PaintTransparentImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType PaintTransparentImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity)
{
return(TransparentPaintImage(image,target,opacity,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P a r s e I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseImageGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <,
% and >.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the ParseImageGeometry method is:
%
% int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o flags: Method ParseImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o image_geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y,
size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return((int) ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a r s e S i z e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseSizeGeometry() returns a region as defined by the geometry string with
% respect to the image dimensions and aspect ratio.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
% ®ion_info->width,®ion_info->height);
%
% The format of the ParseSizeGeometry method is:
%
% MagickStatusType ParseSizeGeometry(const Image *image,
% const char *geometry,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o region_info: the region as defined by the geometry string.
%
*/
MagickExport MagickStatusType ParseSizeGeometry(const Image *image,
const char *geometry,RectangleInfo *region_info)
{
MagickStatusType
flags;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7");
SetGeometry(image,region_info);
flags=ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
®ion_info->width,®ion_info->height);
return(flags);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImageList() removes the last image in the list.
%
% Deprecated, replace with:
%
% RemoveLastImageFromList(images);
%
% The format of the PopImageList method is:
%
% Image *PopImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *PopImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveLastImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImagePixels() transfers one or more pixel components from the image pixel
% cache to a user supplied buffer. The pixels are returned in network byte
% order. MagickTrue is returned if the pixels are successfully transferred,
% otherwise MagickFalse.
%
% The format of the PopImagePixels method is:
%
% size_t PopImagePixels(Image *,const QuantumType quantum,
% unsigned char *destination)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (RGB, RGBA, etc).
%
% o destination: The components are transferred to this buffer.
%
*/
MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum,
unsigned char *destination)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
quantum,destination,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t s c r i p t G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PostscriptGeometry() replaces any page mneumonic with the equivalent size in
% picas.
%
% Deprecated, replace with:
%
% GetPageGeometry(page);
%
% The format of the PostscriptGeometry method is:
%
% char *PostscriptGeometry(const char *page)
%
% A description of each parameter follows.
%
% o page: Specifies a pointer to an array of characters.
% The string is either a Postscript page name (e.g. A4) or a postscript
% page geometry (e.g. 612x792+36+36).
%
*/
MagickExport char *PostscriptGeometry(const char *page)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return(GetPageGeometry(page));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImageList() adds an image to the end of the list.
%
% Deprecated, replace with:
%
% AppendImageToList(images,CloneImageList(image,exception));
%
% The format of the PushImageList method is:
%
% unsigned int PushImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int PushImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
AppendImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImagePixels() transfers one or more pixel components from a user
% supplied buffer into the image pixel cache of an image. The pixels are
% expected in network byte order. It returns MagickTrue if the pixels are
% successfully transferred, otherwise MagickFalse.
%
% The format of the PushImagePixels method is:
%
% size_t PushImagePixels(Image *image,const QuantumType quantum,
% const unsigned char *source)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (red, green, blue,
% opacity, RGB, or RGBA).
%
% o source: The pixel components are transferred from this buffer.
%
*/
MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum,
const unsigned char *source)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum,
source,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z a t i o n E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizationError() measures the difference between the original and
% quantized images. This difference is the total quantization error. The
% error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% Deprecated, replace with:
%
% GetImageQuantizeError(image);
%
% The format of the QuantizationError method is:
%
% unsigned int QuantizationError(Image *image)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
*/
MagickExport unsigned int QuantizationError(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(GetImageQuantizeError(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
return(RotationalBlurImage(image,angle,exception));
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
return(RotationalBlurImageChannel(image,channel,angle,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% R a n d o m C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomChannelThresholdImage() changes the value of individual pixels based
% on the intensity of each pixel compared to a random threshold. The result
% is a low-contrast, two color image.
%
% The format of the RandomChannelThresholdImage method is:
%
% unsigned int RandomChannelThresholdImage(Image *image,
% const char *channel, const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing LOWxHIGH thresholds.
% If the string contains 2x2, 3x3, or 4x4, then an ordered
% dither of order 2, 3, or 4 will be performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int RandomChannelThresholdImage(Image *image,
const char *channel,const char *thresholds,ExceptionInfo *exception)
{
#define RandomChannelThresholdImageText " RandomChannelThreshold image... "
double
lower_threshold,
upper_threshold;
RandomInfo
*random_info;
ssize_t
count,
y;
static MagickRealType
o2[4]={0.2f, 0.6f, 0.8f, 0.4f},
o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f},
o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f,
1.2f, 0.4f, 0.9f, 1.3f, 0.2f},
threshold=128;
size_t
order;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (thresholds == (const char *) NULL)
return(MagickTrue);
lower_threshold=0;
upper_threshold=0;
if (LocaleCompare(thresholds,"2x2") == 0)
order=2;
else
if (LocaleCompare(thresholds,"3x3") == 0)
order=3;
else
if (LocaleCompare(thresholds,"4x4") == 0)
order=4;
else
{
order=1;
count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold,
&upper_threshold);
if (strchr(thresholds,'%') != (char *) NULL)
{
upper_threshold*=(.01*QuantumRange);
lower_threshold*=(.01*QuantumRange);
}
if (count == 1)
upper_threshold=(MagickRealType) QuantumRange-lower_threshold;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" RandomChannelThresholdImage: channel type=%s",channel);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold);
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfo();
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register IndexPacket
index,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
{
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (order == 1)
{
if (intensity < lower_threshold)
threshold=lower_threshold;
else if (intensity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)];
index=(IndexPacket) (intensity <= threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
}
if (LocaleCompare(channel,"opacity") == 0 ||
LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"matte") == 0)
{
if (image->matte != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
if (order == 1)
{
if ((MagickRealType) q->opacity < lower_threshold)
threshold=lower_threshold;
else if ((MagickRealType) q->opacity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7;
SetPixelOpacity(q,(MagickRealType) q->opacity <=
threshold ? 0 : QuantumRange);
q++;
}
}
else
{
/* To Do: red, green, blue, cyan, magenta, yellow, black */
if (LocaleCompare(channel,"intensity") != 0)
ThrowBinaryException(OptionError,"UnrecognizedChannelType",
image->filename);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
random_info=DestroyRandomInfo(random_info);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReacquireMemory() changes the size of the memory and returns a pointer to
% the (possibly moved) block. The contents will be unchanged up to the
% lesser of the new and old sizes.
%
% The format of the ReacquireMemory method is:
%
% void ReacquireMemory(void **memory,const size_t size)
%
% A description of each parameter follows:
%
% o memory: A pointer to a memory allocation. On return the pointer
% may change but the contents of the original allocation will not.
%
% o size: the new size of the allocated memory.
%
*/
MagickExport void ReacquireMemory(void **memory,const size_t size)
{
void
*allocation;
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
{
*memory=AcquireMagickMemory(size);
return;
}
allocation=realloc(*memory,size);
if (allocation == (void *) NULL)
*memory=RelinquishMagickMemory(*memory);
*memory=allocation;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RecolorImage() apply color transformation to an image. The method permits
% saturation changes, hue rotation, luminance to alpha, and various other
% effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the RecolorImage method is:
%
% Image *RecolorImage(const Image *image,const size_t order,
% const double *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o order: the number of columns and rows in the recolor matrix.
%
% o color_matrix: An array of double representing the recolor matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RecolorImage(const Image *image,const size_t order,
const double *color_matrix,ExceptionInfo *exception)
{
KernelInfo
*kernel_info;
Image
*recolor_image;
kernel_info=AcquireKernelInfo("1");
if (kernel_info == (KernelInfo *) NULL)
return((Image *) NULL);
kernel_info->width=order;
kernel_info->height=order;
kernel_info->values=(double *) color_matrix;
recolor_image=ColorMatrixImage(image,kernel_info,exception);
kernel_info->values=(double *) NULL;
kernel_info=DestroyKernelInfo(kernel_info);
return(recolor_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceNoiseImage() smooths the contours of an image while still preserving
% edge information. The algorithm works by replacing each pixel with its
% neighbor closest in value. A neighbor is defined by radius. Use a radius
% of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the ReduceNoiseImage method is:
%
% Image *ReduceNoiseImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReduceNoiseImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*reduce_image;
reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t)
radius,exception);
return(reduce_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n g u i s h S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishSemaphoreInfo() relinquishes a semaphore.
%
% The format of the RelinquishSemaphoreInfo method is:
%
% RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo *) NULL);
UnlockSemaphoreInfo(semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e A t t r i b u t e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageAttributeIterator() resets the image attributes iterator. Use it
% in conjunction with GetNextImageAttribute() to iterate over all the values
% associated with an image.
%
% Deprecated, replace with:
%
% ResetImagePropertyIterator(image);
%
% The format of the ResetImageAttributeIterator method is:
%
% ResetImageAttributeIterator(const ImageInfo *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageAttributeIterator(const Image *image)
{
ResetImagePropertyIterator(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the SetCacheViewPixels method is:
%
% PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t C a c h e T h e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheThreshold() sets the amount of free memory allocated for the pixel
% cache. Once this threshold is exceeded, all subsequent pixels cache
% operations are to/from disk.
%
% The format of the SetCacheThreshold() method is:
%
% void SetCacheThreshold(const size_t threshold)
%
% A description of each parameter follows:
%
% o threshold: the number of megabytes of memory available to the pixel
% cache.
%
*/
MagickExport void SetCacheThreshold(const size_t size)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
(void) SetMagickResourceLimit(MemoryResource,size*1024*1024);
(void) SetMagickResourceLimit(MapResource,2*size*1024*1024);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetExceptionInfo() sets the exception severity.
%
% The format of the SetExceptionInfo method is:
%
% MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
% ExceptionType severity)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
% o severity: the exception severity.
%
*/
MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
ExceptionType severity)
{
assert(exception != (ExceptionInfo *) NULL);
ClearMagickException(exception);
exception->severity=severity;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImage() sets the red, green, and blue components of each pixel to
% the image background color and the opacity component to the specified
% level of transparency. The background color is defined by the
% background_color member of the image.
%
% The format of the SetImage method is:
%
% void SetImage(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: Set each pixel to this level of transparency.
%
*/
MagickExport void SetImage(Image *image,const Quantum opacity)
{
PixelPacket
background_color;
ssize_t
y;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0");
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
background_color=image->background_color;
if (opacity != OpaqueOpacity)
background_color.opacity=opacity;
if (background_color.opacity != OpaqueOpacity)
{
(void) SetImageStorageClass(image,DirectClass);
image->matte=MagickTrue;
}
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
{
/*
Set colormapped or CMYK image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,0);
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
return;
}
/*
Set DirectClass image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAttribute() searches the list of image attributes and replaces the
% attribute value. If it is not found in the list, the attribute name
% and value is added to the list.
%
% Deprecated, replace with:
%
% SetImageProperty(image,key,value);
%
% The format of the SetImageAttribute method is:
%
% MagickBooleanType SetImageAttribute(Image *image,const char *key,
% const char *value)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: the key.
%
% o value: the value.
%
*/
MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key,
const char *value)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageList() inserts an image into the list at the specified position.
%
% The format of the SetImageList method is:
%
% unsigned int SetImageList(Image *images,const Image *image,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int SetImageList(Image **images,const Image *image,
const ssize_t offset,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(image,exception);
while (GetPreviousImageInList(*images) != (Image *) NULL)
(*images)=GetPreviousImageInList(*images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(*images) == (Image *) NULL)
return(MagickFalse);
(*images)=GetNextImageInList(*images);
}
InsertImageInList(images,clone);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImagePixels() queues a mutable pixel region.
% If the region is successfully initialized a pointer to a PixelPacket
% array representing the region is returned, otherwise NULL is returned.
% The returned pointer may point to a temporary working buffer for the
% pixels or it may point to the final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This useful while the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% SetImagePixels() any way it pleases. SetImagePixels() does not initialize
% the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in RAM, or in a
% memory-mapped file. The returned pointer should *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% QueueAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the SetImagePixels() method is:
%
% PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o pixels: SetImagePixels returns a pointer to the pixels if they are
% transferred, otherwise a NULL is returned.
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMagickRegistry() sets a blob into the registry and returns a unique ID.
% If an error occurs, -1 is returned.
%
% The format of the SetMagickRegistry method is:
%
% ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
% const size_t length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o type: the registry type.
%
% o blob: the address of a Binary Large OBject.
%
% o length: For a registry type of ImageRegistryType use sizeof(Image)
% otherise the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
const size_t magick_unused(length),ExceptionInfo *exception)
{
char
key[MaxTextExtent];
MagickBooleanType
status;
static ssize_t
id = 0;
magick_unreferenced(length);
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
status=SetImageRegistry(type,key,blob,exception);
if (status == MagickFalse)
return(-1);
return(id++);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M o n i t o r H a n d l e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMonitorHandler() sets the monitor handler to the specified method
% and returns the previous monitor handler.
%
% The format of the SetMonitorHandler method is:
%
% MonitorHandler SetMonitorHandler(MonitorHandler handler)
%
% A description of each parameter follows:
%
% o handler: Specifies a pointer to a method to handle monitors.
%
*/
MagickExport MonitorHandler GetMonitorHandler(void)
{
return(monitor_handler);
}
MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler)
{
MonitorHandler
previous_handler;
previous_handler=monitor_handler;
monitor_handler=handler;
return(previous_handler);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShiftImageList() removes an image from the beginning of the list.
%
% Deprecated, replace with:
%
% RemoveFirstImageFromList(images);
%
% The format of the ShiftImageList method is:
%
% Image *ShiftImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *ShiftImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveFirstImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S i z e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SizeBlob() returns the current length of the image file or blob.
%
% Deprecated, replace with:
%
% GetBlobSize(image);
%
% The format of the SizeBlob method is:
%
% off_t SizeBlob(Image *image)
%
% A description of each parameter follows:
%
% o size: Method SizeBlob returns the current length of the image file
% or blob.
%
% o image: the image.
%
*/
MagickExport MagickOffsetType SizeBlob(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
return((MagickOffsetType) GetBlobSize(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImageList() removes the images designated by offset and length from
% the list and replaces them with the specified list.
%
% The format of the SpliceImageList method is:
%
% Image *SpliceImageList(Image *images,const ssize_t offset,
% const size_t length,const Image *splices,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o length: the length of the image list to remove.
%
% o splice: Replace the removed image list with this list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImageList(Image *images,const ssize_t offset,
const size_t length,const Image *splices,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(splices,exception);
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return((Image *) NULL);
images=GetNextImageInList(images);
}
(void) SpliceImageIntoList(&images,length,clone);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBCompandor() adds the gamma function to a sRGB pixel.
%
% The format of the sRGBCompandor method is:
%
% MagickRealType sRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92*pixel);
return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Strip() strips any whitespace or quotes from the beginning and end of a
% string of characters.
%
% The format of the Strip method is:
%
% void Strip(char *message)
%
% A description of each parameter follows:
%
% o message: Specifies an array of characters.
%
*/
MagickExport void Strip(char *message)
{
register char
*p,
*q;
assert(message != (char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*message == '\0')
return;
if (strlen(message) == 1)
return;
p=message;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if ((*p == '\'') || (*p == '"'))
p++;
q=message+strlen(message)-1;
while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p))
q--;
if (q > p)
if ((*q == '\'') || (*q == '"'))
q--;
(void) memcpy(message,p,(size_t) (q-p+1));
message[q-p+1]='\0';
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheView() saves the cache view pixels to the in-memory or disk
% cache. It returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheView method is:
%
% MagickBooleanType SyncCacheView(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheViewPixels() saves the cache view pixels to the in-memory
% or disk cache. It returns MagickTrue if the pixel region is flushed,
% otherwise MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheViewPixels method is:
%
% MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncAuthenticPixels(image,&image->exception);
%
% The format of the SyncImagePixels() method is:
%
% MagickBooleanType SyncImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagePixels(Image *image)
{
return(SyncAuthenticPixels(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y s t e m C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SystemCommand() executes the specified command and waits until it
% terminates. The returned value is the exit status of the command.
%
% The format of the SystemCommand method is:
%
% int SystemCommand(const MagickBooleanType asynchronous,
% const MagickBooleanType verbose,const char *command,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o asynchronous: a value other than 0 executes the parent program
% concurrently with the new child process.
%
% o verbose: a value other than 0 prints the executed command before it is
% invoked.
%
% o command: this string is the command to execute.
%
% o exception: return any errors here.
%
*/
MagickExport int SystemCommand(const MagickBooleanType asynchronous,
const MagickBooleanType verbose,const char *command,ExceptionInfo *exception)
{
int
status;
status=ExternalDelegateCommand(asynchronous,verbose,command,(char *) NULL,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e m p o r a r y F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TemporaryFilename() replaces the contents of path by a unique path name.
%
% The format of the TemporaryFilename method is:
%
% void TemporaryFilename(char *path)
%
% A description of each parameter follows.
%
% o path: Specifies a pointer to an array of characters. The unique path
% name is returned in this array.
%
*/
MagickExport void TemporaryFilename(char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
(void) AcquireUniqueFilename(path);
(void) RelinquishUniqueFileResource(path);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel compared to threshold. The result is a
% high-contrast, two color image.
%
% The format of the ThresholdImage method is:
%
% unsigned int ThresholdImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value
%
*/
MagickExport unsigned int ThresholdImage(Image *image,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
IndexPacket
index;
ssize_t
y;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <=
threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImageChannel() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ThresholdImageChannel method is:
%
% unsigned int ThresholdImageChannel(Image *image,const char *threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
*/
MagickExport unsigned int ThresholdImageChannel(Image *image,
const char *threshold)
{
#define ThresholdImageTag "Threshold/Image"
MagickPixelPacket
pixel;
GeometryInfo
geometry_info;
IndexPacket
index;
ssize_t
y;
unsigned int
flags;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&pixel);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=geometry_info.rho;
if (flags & SigmaValue)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if (flags & XiValue)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if (flags & PsiValue)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
if (flags & PercentValue)
{
pixel.red*=QuantumRange/100.0f;
pixel.green*=QuantumRange/100.0f;
pixel.blue*=QuantumRange/100.0f;
pixel.opacity*=QuantumRange/100.0f;
}
if (!(flags & SigmaValue))
{
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
if (pixel.red == 0)
(void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (IsMagickGray(&pixel) != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRed(q,image->colormap[(ssize_t) index].red);
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,(MagickRealType) q->red <= pixel.red
? 0 : QuantumRange);
SetPixelGreen(q,(MagickRealType) q->green <= pixel.green
? 0 : QuantumRange);
SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue
? 0 : QuantumRange);
SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity
? 0 : QuantumRange);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformColorspace() converts the image to a specified colorspace.
% If the image is already in the requested colorspace, no work is performed.
% Note that the current colorspace is stored in the image colorspace member.
% The transformation matrices are not necessarily the standard ones: the
% weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% Deprecated, replace with:
%
% TransformImageColorspace(image,colorspace);
%
% The format of the TransformColorspace method is:
%
% unsigned int (void) TransformColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image to transform
%
% o colorspace: the desired colorspace.
%
*/
MagickExport unsigned int TransformColorspace(Image *image,
const ColorspaceType colorspace)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
return(TransformImageColorspace(image,colorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m H S L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformHSL() converts a (red, green, blue) to a (hue, saturation,
% lightness) triple.
%
% The format of the TransformHSL method is:
%
% void TransformHSL(const Quantum red,const Quantum green,
% const Quantum blue,double *hue,double *saturation,double *lightness)
%
% A description of each parameter follows:
%
% o red, green, blue: A Quantum value representing the red, green, and
% blue component of a pixel..
%
% o hue, saturation, lightness: A pointer to a double value representing a
% component of the HSL color space.
%
*/
MagickExport void TransformHSL(const Quantum red,const Quantum green,
const Quantum blue,double *hue,double *saturation,double *lightness)
{
MagickRealType
b,
delta,
g,
max,
min,
r;
/*
Convert RGB to HSL colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(lightness != (double *) NULL);
r=QuantumScale*red;
g=QuantumScale*green;
b=QuantumScale*blue;
max=MagickMax(r,MagickMax(g,b));
min=MagickMin(r,MagickMin(g,b));
*hue=0.0;
*saturation=0.0;
*lightness=(double) ((min+max)/2.0);
delta=max-min;
if (delta == 0.0)
return;
*saturation=(double) (delta/((*lightness < 0.5) ? (min+max) :
(2.0-max-min)));
if (r == max)
*hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta);
else
if (g == max)
*hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta);
else
*hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta);
*hue/=6.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s l a t e T e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TranslateText() replaces any embedded formatting characters with the
% appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the TranslateText method is:
%
% char *TranslateText(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *TranslateText(const ImageInfo *image_info,Image *image,
const char *embed_text)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentImage method is:
%
% MagickBooleanType TransparentImage(Image *image,
% const PixelPacket target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType TransparentImage(Image *image,
const PixelPacket target,const Quantum opacity)
{
#define TransparentImageTag "Transparent/Image"
MagickBooleanType
proceed;
ssize_t
y;
/*
Make image color transparent.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnshiftImageList() adds the image to the beginning of the list.
%
% Deprecated, replace with:
%
% PrependImageToList(images,CloneImageList(image,exception));
%
% The format of the UnshiftImageList method is:
%
% unsigned int UnshiftImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int UnshiftImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
PrependImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ V a l i d a t e C o l o r m a p I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ValidateColormapIndex() validates the colormap index. If the index does
% not range from 0 to the number of colors in the colormap an exception
% issued and 0 is returned.
%
% Deprecated, replace with:
%
% ConstrainColormapIndex(image,index);
%
% The format of the ValidateColormapIndex method is:
%
% IndexPacket ValidateColormapIndex(Image *image,const unsigned int index)
%
% A description of each parameter follows:
%
% o index: Method ValidateColormapIndex returns colormap index if it is
% valid other an exception issued and 0 is returned.
%
% o image: the image.
%
% o index: This integer is the colormap index.
%
*/
MagickExport IndexPacket ValidateColormapIndex(Image *image,
const size_t index)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4");
return(ConstrainColormapIndex(image,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z o o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZoomImage() creates a new image that is a scaled size of an existing one.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image. The Point filter gives fast pixel replication,
% Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower,
% very high-quality results. See Graphic Gems III for details on this
% algorithm.
%
% The filter member of the Image structure specifies which image filter to
% use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp.
%
% The format of the ZoomImage method is:
%
% Image *ZoomImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: An integer that specifies the number of columns in the zoom
% image.
%
% o rows: An integer that specifies the number of rows in the scaled
% image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ZoomImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
Image
*zoom_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
return(zoom_image);
}
#endif
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <getopt.h>
#include <time.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
int main (int argc, char** argv)
{
// Initialization of variables
int i, j, k, height = 8000, width = 8000, num_iterations = 1000, opt;
float alpha = 0.1;
#ifndef _OPENMP
clock_t before, after;
#else
double before, after;
#endif
double time_used;
// Parsing command-line options
while ((opt = getopt(argc, argv, "h:w:t:a:")) != -1) {
switch (opt) {
case 'h':
height = atoi(optarg);
break;
case 'w':
width = atoi(optarg);
break;
case 't':
num_iterations = atoi(optarg);
break;
case 'a':
alpha = atof(optarg);
break;
default:
fprintf(stderr, "Usage: %s [-h height] [-w width] [-t no. iterations] [-a alpha value for heat eq.]\n", argv[0]);
exit(EXIT_FAILURE);
}
}
// beta reduces the stencil operation to only require 6 flops (instead of 7)
float beta = (1 - 4*alpha);
// Allocate matrices
float **tmp; // temporary pointer to perform pointer swaps
float **a = (float**) malloc(height*sizeof(float*));
float **b = (float**) malloc(height*sizeof(float*));
for (i = 0; i < height; ++i) {
a[i] = (float*) malloc(width*sizeof(float));
b[i] = (float*) malloc(width*sizeof(float));
}
// Instantiate random values in matrices
#pragma omp parallel for private(j)
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
a[i][j] = (float) rand() / (float) (RAND_MAX);
b[i][j] = a[i][j];
}
}
// Start timer
#ifndef _OPENMP
before = clock();
#else
before = omp_get_wtime();
#endif
// Perform computations
#pragma omp parallel private(i,j,k)
{
#ifdef _OPENMP
#pragma omp single
{
printf("Using %d OpenMP threads to parallelize heat equation\n", omp_get_num_threads());
fflush(NULL);
}
#endif
// Perform heat equation
for (k = 0; k < num_iterations; ++k) {
#pragma omp for
for (i = 1; i < height - 1; ++i)
for (j = 1; j < width - 1; ++j)
b[i][j] = beta*a[i][j] + alpha*(a[i-1][j] + a[i][j-1] + a[i][j+1] + a[i+1][j]);
#pragma omp single
{
// pointer swap
tmp = b;
b = a;
a = tmp;
}
}
}
// End timer and evaluate time used
#ifndef _OPENMP
after = clock();
time_used = (float) (after - before) / (float) CLOCKS_PER_SEC;
#else
after = omp_get_wtime();
time_used = after - before;
#endif
// deallocate matrices
for (i = 0; i < height; ++i) {
free(a[i]);
free(b[i]);
}
free(a);
free(b);
// Report parameters and results
printf("2D Grid : %d x %d\n", height, width);
printf("Iterations : %d\n", num_iterations);
printf("alpha : %g\n", alpha);
printf("Time : %f s\n", time_used);
printf("Throughput : %f GFLOPS\n", 1e-9*(float)num_iterations*(float)(height-2)*(float)(width-2)*6/time_used);
printf("Minimal Bandwidth : %f GB/s\n", 1e-9*sizeof(float)*(float)num_iterations*(float)height*(float)width*2.0/time_used);
return EXIT_SUCCESS;
} |
stencil3d_mdev.c.c | //
// Created by Yonghong Yan on 3/8/16.
//
#if 0
void stencil3d_omp_mdev(long n, long m, long k, REAL *u, int radius, REAL *coeff, int num_its) {
long it; /* iteration */
long u_dimX = n + 2 * radius;
long u_dimY = m + 2 * radius;
long u_dimZ = k + 2 * radius;
int coeff_dimX = 2 * radius + 1;
coeff = coeff + (2 * radius + 1) * radius + radius; /* let coeff point to the center element */
int count = 4*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX * coeff_dimX;
#endif
/* uold should be simpliy allocated on the dev and then copy data from u, here we simplified the initialization */
REAL *uold = (REAL *) malloc(sizeof(REAL) * u_dimX * u_dimY * u_dimZ);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY * u_dimZ);
#pragma omp target data device(*) map(to:n, m, u_dimX, u_dimY, radius, coeff_center, coeff[coeff_dimX][coeff_dimX]) \
map(tofrom:u[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(radius,)) map(to:uold[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(radius,))
#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold) //num_threads(/* num of devices + number of cores */)
{
int ix, iy, iz, ir;
/*
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < u_dimX; ix++) {
for (iy = 0; iy < u_dimY; iy++) {
uold[ix * u_dimY + iy] = u[ix * u_dimY + iy];
}
}
*/
for (it = 0; it < num_its; it++) {
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < n; ix++) {
REAL *temp_u = &u[(ix + radius) * u_dimY+radius];
REAL *temp_uold = &uold[(ix + radius) * u_dimY+radius];
for (iy = 0; iy < m; iy++) {
for (iz = 0; iz < k; iz++) {
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir] * temp_uold[-ir]; // horizontal left
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - Z
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - Z
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - Z
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - Z
#endif
}
*temp_u = result/count;
temp_u++;
temp_uold++;
}//z end
}//y end
}
#pragma omp halo_exchange(u);
REAL *tmp = uold;
uold = u;
u = tmp;
} /* End iteration loop */
}
free(uold);
}
#endif
double stencil3d_omp_mdev(long n, long m, long k, REAL *u, int radius, REAL *coeff, int num_its) {
long u_dimX = n + 2 * radius;
long u_dimY = m + 2 * radius;
long u_dimZ = k + 2 * radius;
int coeff_dimX = 2*radius+1;//NOTE Check the correctness
REAL * coeff_center = coeff + (2*radius+1) * radius + radius; /* let coeff point to the center element */
REAL *uold = (REAL *) omp_unified_malloc(sizeof(REAL) * u_dimX * u_dimY* u_dimZ);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY* u_dimZ);
//print_array("Before offloading", "u", u, u_dimX, u_dimY);
double off_init_time = read_timer_ms();
int __top_ndims__;
/**************************************** dist-specific *****************************************/
if (dist_dim == 1 || dist_dim == 2) __top_ndims__ = 1;
else /* dist == 3 */__top_ndims__ = 2;
/************************************************************************************************/
/* use all the devices */
int __num_targets__ = omp_get_num_active_devices(); /*XXX: = runtime or compiler generated code */
omp_grid_topology_t * __top__ = omp_grid_topology_init_simple(__num_targets__, __top_ndims__);
/* init other infos (dims, periodic, idmaps) of top if needed */
int __num_maps__ = 3; /* u, uold and the coeff */ /* XXX: need compiler output */
/* data copy offloading */
omp_offloading_info_t *__copy_data_off_info__ =
omp_offloading_init_info("data copy", __top__, 1, OMP_OFFLOADING_DATA, __num_maps__, NULL, NULL, 0);
/* stencil kernel offloading */
struct stencil3d_off_args off_args;
off_args.n = n; off_args.m = m; off_args.u = u; off_args.radius = radius; off_args.coeff = coeff; off_args.num_its = num_its;
off_args.uold = uold; off_args.coeff_center = coeff_center; off_args.coeff_dimX = coeff_dimX; off_args.u_dimX = u_dimX; off_args.u_dimY = u_dimY; off_args.u_dimZ = u_dimZ;
omp_offloading_info_t * __off_info__ =
omp_offloading_init_info("stencil3d kernel", __top__, 1, OMP_OFFLOADING_CODE, 0,
stencil3d_omp_mdev_off_launcher, &off_args, 1);
omp_offloading_append_profile_per_iteration(__off_info__, 13*u_dimY*u_dimZ, 7, 1);//NOTE: how to handle this for z?
//printf("data copy off: %X, stencil3d off: %X\n", __copy_data_off_info__, __off_info__);
/* u map info */
omp_data_map_info_t *__u_map_info__ = &__copy_data_off_info__->data_map_info[0];
omp_data_map_init_info("u", __u_map_info__, __copy_data_off_info__, u, 2, sizeof(REAL), OMP_DATA_MAP_TOFROM, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__u_map_info__, u_dimX, u_dimY, u_dimZ);
/* uold map info */
omp_data_map_info_t *__uold_map_info__ = &__copy_data_off_info__->data_map_info[1];
omp_data_map_init_info("uold", __uold_map_info__, __copy_data_off_info__, uold, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__uold_map_info__, u_dimX, u_dimY, u_dimZ);
/* coeff map info */
omp_data_map_info_t *__coeff_map_info__ = &__copy_data_off_info__->data_map_info[3];//changed it to 3 added this
omp_data_map_init_info("coeff", __coeff_map_info__, __copy_data_off_info__, coeff, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_3d(__coeff_map_info__, coeff_dimX, coeff_dimX, coeff_dimX);
omp_data_map_dist_init_info(__coeff_map_info__, 0, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 1, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 2, OMP_DIST_POLICY_DUPLICATE, 0, coeff_dimX, 0);//added this
/**************************************** dist-specific *****************************************/
if (dist_dim == 1) {
if (dist_policy == 1) { /* BLOCK_BLOCK */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0);
//printf("BLOCK dist policy for arrays and loop dist\n");
} else if (dist_policy == 2) { /* BLOCK_ALIGN */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_loop_dist_align_with_data_map(__off_info__, 0, 0, __u_map_info__, 0);
//printf("BLOCK dist policy for arrays, and loop dist align with array A row dist\n");
} else if (dist_policy == 3) { /* AUTO_ALIGN */
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_AUTO, 0, n, 0);
omp_data_map_dist_align_with_loop(__u_map_info__, 0, radius, __off_info__, 0);
//printf("AUTO dist policy for loop dist and array align with loops\n");
}
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_DUPLICATE, 0, u_dimY, 0);
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_START, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_DUPLICATE, 0, u_dimZ, 0); //added this part of code
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_START, __u_map_info__, OMP_ALL_DIMENSIONS);
} else if (dist_dim == 2) {
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_DUPLICATE, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_BLOCK, radius, n, 0);//added
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0);
omp_loop_dist_init_info(__off_info__, 2, OMP_DIST_POLICY_BLOCK, 0, k, 0);//added
} else /* dist == 3 */{
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, radius, n, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, radius, n, 1);
omp_data_map_dist_init_info(__u_map_info__, 2, OMP_DIST_POLICY_BLOCK, radius, n, 1);
omp_map_add_halo_region(__u_map_info__, 0, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_map_add_halo_region(__u_map_info__, 1, radius, radius, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 1);
omp_loop_dist_init_info(__off_info__, 2, OMP_DIST_POLICY_BLOCK, 0, k, 1);
}
/************************************************************************************************/
off_init_time = read_timer_ms() - off_init_time;
/*********** NOW notifying helper thread to work on this offload ******************/
#if DEBUG_MSG
printf("=========================================== offloading to %d targets ==========================================\n", __num_target_devices__);
#endif
double off_copyto_time = read_timer_ms();
double start_time = off_copyto_time;
omp_offloading_start(__copy_data_off_info__, 0);
omp_print_map_info(__u_map_info__);
omp_print_map_info(__uold_map_info__);
omp_print_map_info(__coeff_map_info__);
off_copyto_time = read_timer_ms() - off_copyto_time;
// printf("offloading from stencil now\n");
double off_kernel_time = read_timer_ms();
int it;
for (it=0; it< num_runs; it++) omp_offloading_start(__off_info__, it== num_runs -1);
off_kernel_time = (read_timer_ms() - off_kernel_time)/ num_runs;
/* copy back u from each device and free others */
double off_copyfrom_time = read_timer_ms();
omp_offloading_start(__copy_data_off_info__, 1);
off_copyfrom_time = read_timer_ms() - off_copyfrom_time;
double off_total = off_init_time + off_copyto_time + off_copyfrom_time + off_kernel_time;
#if defined (OMP_BREAKDOWN_TIMING)
omp_offloading_info_report_profile(__copy_data_off_info__);
omp_offloading_info_report_profile(__off_info__);
omp_offloading_info_t *infos[2];
infos[0] = __copy_data_off_info__;
infos[1] = __off_info__;
omp_offloading_info_sum_profile(infos, 2, start_time, start_time+off_total);
omp_offloading_info_report_profile(__copy_data_off_info__);
#endif
omp_offloading_fini_info(__copy_data_off_info__);
omp_offloading_fini_info(__off_info__);
omp_grid_topology_fini(__top__);
omp_unified_free(uold);
return off_total;
} |
blake2bp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store32( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store32( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, S->outlen );
}
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[i], in__, len );
}
blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; /* Mark as last node */
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( FS, out, outlen );;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2bp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2bp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
im2col.h | #ifndef IM2COL_H_
#define IM2COL_H_
#include <omp.h>
#include <torch/extension.h>
#include "nn/common/im2col.h"
namespace mapped_conv {
namespace nn {
namespace cpu {
template <typename T>
void Im2Col2D(const int64_t num_kernels, torch::Tensor data_im,
const int64_t height_im, const int64_t width_im,
const int64_t width_out, const int64_t width_col,
const int64_t kernel_h, const int64_t kernel_w,
const int64_t pad_h, const int64_t pad_w, const int64_t stride_h,
const int64_t stride_w, const int64_t dilation_h,
const int64_t dilation_w, torch::Tensor data_col) {
T *data_col_ptr = data_col.data<T>();
const T *data_im_ptr = data_im.data<T>();
int64_t index;
#pragma omp parallel for shared(data_col_ptr, data_im_ptr) private(index) \
schedule(static)
for (index = 0; index < num_kernels; index++) {
common::Im2Col2D(index, data_im_ptr, height_im, width_im, width_out,
width_col, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, data_col_ptr);
}
}
template <typename T>
void Col2Im2D(const int64_t num_kernels, torch::Tensor data_col,
const int64_t height, const int64_t width,
const int64_t output_height, const int64_t output_width,
const int64_t kernel_h, const int64_t kernel_w,
const int64_t pad_h, const int64_t pad_w, const int64_t stride_h,
const int64_t stride_w, const int64_t dilation_h,
const int64_t dilation_w, torch::Tensor data_im) {
const T *data_col_ptr = data_col.data<T>();
T *data_im_ptr = data_im.data<T>();
int64_t index;
#pragma omp parallel for shared(data_col_ptr, data_im_ptr) private(index) \
schedule(static)
for (index = 0; index < num_kernels; index++) {
common::Col2Im2D(index, data_col_ptr, height, width, output_height,
output_width, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, data_im_ptr);
}
}
} // namespace cpu
} // namespace nn
} // namespace mapped_conv
#endif |
omp_get_num_threads.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_get_num_threads()
{
/* checks that omp_get_num_threads is equal to the number of
threads */
int nthreads_lib;
int nthreads = 0;
nthreads_lib = -1;
#pragma omp parallel
{
#pragma omp critical
{
nthreads++;
} /* end of critical */
#pragma omp single
{
nthreads_lib = omp_get_num_threads ();
} /* end of single */
} /* end of parallel */
return (nthreads == nthreads_lib);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_get_num_threads()) {
num_failed++;
}
}
return num_failed;
}
|
core_sormlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zunmlq.c, normal z -> s, Fri Sep 28 17:38:25 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_unmlq
*
* Overwrites the general complex m-by-n tile C with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * C C * Q
* trans = PlasmaTrans Q^T * C C * Q^T
*
* where Q is a orthogonal matrix defined as the product of k
* elementary reflectors
* \f[
* Q = H(k) . . . H(2) H(1)
* \f]
* as returned by plasma_core_sgelqt. Q is of order m if side = PlasmaLeft
* and of order n if side = PlasmaRight.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft : apply Q or Q^T from the Left;
* - PlasmaRight : apply Q or Q^T from the Right.
*
* @param[in] trans
* - PlasmaNoTrans : No transpose, apply Q;
* - PlasmaTrans : Transpose, apply Q^T.
*
* @param[in] m
* The number of rows of the tile C. m >= 0.
*
* @param[in] n
* The number of columns of the tile C. n >= 0.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
* If side = PlasmaLeft, m >= k >= 0;
* if side = PlasmaRight, n >= k >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in] A
* Dimension: (lda,m) if SIDE = PlasmaLeft,
* (lda,n) if SIDE = PlasmaRight,
* The i-th row must contain the vector which defines the
* elementary reflector H(i), for i = 1,2,...,k, as returned by
* plasma_core_sgelqt in the first k rows of its array argument A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in] T
* The ib-by-k triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param[in,out] C
* On entry, the m-by-n tile C.
* On exit, C is overwritten by Q*C or Q^T*C or C*Q^T or C*Q.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
* @param work
* Auxiliary workspace array of length
* ldwork-by-m if side == PlasmaLeft
* ldwork-by-ib if side == PlasmaRight
*
* @param[in] ldwork
* The leading dimension of the array work.
* ldwork >= max(1,ib) if side == PlasmaLeft
* ldwork >= max(1,n) if side == PlasmaRight
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_sormlq(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k, int ib,
const float *A, int lda,
const float *T, int ldt,
float *C, int ldc,
float *work, int ldwork)
{
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_coreblas_error("illegal value of side");
return -1;
}
int nq; // order of Q
int nw; // dimension of work
if (side == PlasmaLeft) {
nq = m;
nw = n;
}
else {
nq = n;
nw = m;
}
if (trans != PlasmaNoTrans && trans != PlasmaTrans) {
plasma_coreblas_error("illegal value of trans");
return -2;
}
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -4;
}
if (k < 0 || k > nq) {
plasma_coreblas_error("illegal value of k");
return -5;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -6;
}
if (A == NULL) {
plasma_coreblas_error("NULL A");
return -7;
}
if ((lda < imax(1, k)) && (k > 0)) {
plasma_coreblas_error("illegal value of lda");
return -8;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -9;
}
if (ldt < imax(1, ib)) {
plasma_coreblas_error("illegal value of ldt");
return -10;
}
if (C == NULL) {
plasma_coreblas_error("NULL C");
return -11;
}
if ((ldc < imax(1, m)) && (m > 0)) {
plasma_coreblas_error("illegal value of ldc");
return -12;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -13;
}
if ((ldwork < imax(1, nw)) && (nw > 0)) {
plasma_coreblas_error("illegal value of ldwork");
return -14;
}
// quick return
if (m == 0 || n == 0 || k == 0)
return PlasmaSuccess;
int i1, i3;
if ((side == PlasmaLeft && trans == PlasmaNoTrans) ||
(side == PlasmaRight && trans != PlasmaNoTrans)) {
i1 = 0;
i3 = ib;
}
else {
i1 = ((k-1)/ib)*ib;
i3 = -ib;
}
if (trans == PlasmaNoTrans)
trans = PlasmaTrans;
else
trans = PlasmaNoTrans;
for (int i = i1; i > -1 && i < k; i += i3) {
int kb = imin(ib, k-i);
int ic = 0;
int jc = 0;
int ni = n;
int mi = m;
if (side == PlasmaLeft) {
// H or H^T is applied to C(i:m,1:n).
mi = m - i;
ic = i;
}
else {
// H or H^T is applied to C(1:m,i:n).
ni = n - i;
jc = i;
}
// Apply H or H^T.
LAPACKE_slarfb_work(LAPACK_COL_MAJOR,
lapack_const(side),
lapack_const(trans),
lapack_const(PlasmaForward),
lapack_const(PlasmaRowwise),
mi, ni, kb,
&A[lda*i+i], lda,
&T[ldt*i], ldt,
&C[ldc*jc+ic], ldc,
work, ldwork);
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_sormlq(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k, int ib,
const float *A, int lda,
const float *T, int ldt,
float *C, int ldc,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:T[0:ib*k]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
float *W = (float*)work.spaces[tid];
int ldwork = side == PlasmaLeft ? n : m; // TODO: float check
// Call the kernel.
int info = plasma_core_sormlq(side, trans,
m, n, k, ib,
A, lda,
T, ldt,
C, ldc,
W, ldwork);
if (info != PlasmaSuccess) {
plasma_error("core_sormlq() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
DRB033-truedeplinear-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A linear expression is used as array subscription.
Data race pair: a[2*i+1]@64:5 vs. a[i]@64:14
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int a[2000];
#pragma omp parallel for private (i)
for (i = 0; i <= 1999; i += 1) {
a[i] = i;
}
for (i = 0; i <= 999; i += 1) {
a[2 * i + 1] = a[i] + 1;
}
printf("a[1001]=%d\n",a[1001]);
return 0;
}
|
scatter.c | #include "../../shared.h"
#include "../hale_data.h"
#include "hale.h"
#include <float.h>
#include <stdio.h>
// Scatter the subcell energy and mass quantities back to the cell centers
void scatter_energy_and_mass(
const int ncells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* cell_volume, double* energy, double* density,
double* kinetic_energy, double* velocity_x, double* velocity_y,
double* velocity_z, double* cell_mass, double* subcell_mass,
double* subcell_ie_mass, double* subcell_ke_mass, int* faces_to_nodes,
int* faces_to_nodes_offsets, int* cells_to_faces_offsets,
int* cells_to_faces, int* cells_to_nodes, int* cells_to_nodes_offsets,
double initial_mass, double initial_ie_mass, double initial_ke_mass);
// Scatter the subcell momentum to the node centered velocities
void scatter_momentum(const int nnodes, vec_t* initial_momentum,
int* nodes_to_cells_offsets, int* nodes_to_cells,
int* cells_to_nodes_offsets, int* cells_to_nodes,
double* velocity_x, double* velocity_y,
double* velocity_z, double* nodal_mass,
double* subcell_mass, double* subcell_momentum_x,
double* subcell_momentum_y, double* subcell_momentum_z);
// Perform the scatter step of the ALE remapping algorithm
void scatter_phase(UnstructuredMesh* umesh, HaleData* hale_data,
vec_t* initial_momentum, double initial_mass,
double initial_ie_mass, double initial_ke_mass) {
// Calculates the cell volume, subcell volume and the subcell centroids
calc_volumes_centroids(
umesh->ncells, umesh->nnodes, hale_data->nnodes_by_subcell,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
hale_data->subcells_to_faces_offsets, hale_data->subcells_to_faces,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
hale_data->subcell_volume, hale_data->cell_volume,
hale_data->nodal_volumes, umesh->nodes_to_cells_offsets, umesh->nodes_to_cells);
// Scatter the subcell momentum to the node centered velocities
scatter_momentum(umesh->nnodes, initial_momentum, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells, umesh->cells_to_nodes_offsets,
umesh->cells_to_nodes, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->nodal_mass, hale_data->subcell_mass,
hale_data->subcell_momentum_x, hale_data->subcell_momentum_y,
hale_data->subcell_momentum_z);
// Scatter the subcell energy and mass quantities back to the cell centers
scatter_energy_and_mass(
umesh->ncells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0,
hale_data->cell_volume, hale_data->energy0, hale_data->density0,
hale_data->ke_mass, hale_data->velocity_x0, hale_data->velocity_y0,
hale_data->velocity_z0, hale_data->cell_mass, hale_data->subcell_mass,
hale_data->subcell_ie_mass, hale_data->subcell_ke_mass,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->cells_to_faces_offsets, umesh->cells_to_faces,
umesh->cells_to_nodes, umesh->cells_to_nodes_offsets,
initial_mass, initial_ie_mass, initial_ke_mass);
}
// Scatter the subcell energy and mass quantities back to the cell centers
void scatter_energy_and_mass(
const int ncells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* cell_volume, double* energy, double* density,
double* ke_mass, double* velocity_x, double* velocity_y, double* velocity_z,
double* cell_mass, double* subcell_mass, double* subcell_ie_mass,
double* subcell_ke_mass, int* faces_to_nodes, int* faces_to_nodes_offsets,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* cells_to_nodes_offsets, double initial_mass,
double initial_ie_mass, double initial_ke_mass) {
// Scatter energy and density, and print the conservation of mass
double rz_total_mass = 0.0;
double rz_total_e_mass = 0.0;
#pragma omp parallel for reduction(+ : rz_total_mass, rz_total_e_mass)
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell = cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
double total_mass = 0.0;
double new_ke_mass = 0.0;
double total_ie_mass = 0.0;
double total_ke_mass = 0.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
total_mass += subcell_mass[(subcell_index)];
total_ie_mass += subcell_ie_mass[(subcell_index)];
total_ke_mass += subcell_ke_mass[(subcell_index)];
new_ke_mass += subcell_mass[(subcell_index)] * 0.5 *
(velocity_x[(node_index)] * velocity_x[(node_index)] +
velocity_y[(node_index)] * velocity_y[(node_index)] +
velocity_z[(node_index)] * velocity_z[(node_index)]);
}
// Update the volume of the cell to the new rezoned mesh
vec_t cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &cell_c);
calc_volume(cell_to_faces_off, nfaces_by_cell, cells_to_faces,
faces_to_nodes, faces_to_nodes_offsets, nodes_x, nodes_y,
nodes_z, &cell_c, &cell_volume[(cc)]);
// Scatter the energy and density
cell_mass[(cc)] = total_mass;
density[(cc)] = cell_mass[(cc)] / cell_volume[(cc)];
const double total_e_mass = total_ie_mass + (total_ke_mass - new_ke_mass);
energy[(cc)] = total_e_mass / cell_mass[(cc)];
// Calculate the conservation data
rz_total_mass += total_mass;
rz_total_e_mass += total_e_mass;
}
printf("Initial Total Mass %.12f\n", initial_mass);
printf("Rezoned Total Mass %.12f\n", rz_total_mass);
printf("Difference %.12f\n\n", rz_total_mass - initial_mass);
printf("Initial Total Energy %.12f\n",
(initial_ie_mass + initial_ke_mass));
printf("Rezoned Total Internal Energy %.12f\n", rz_total_e_mass);
printf("Difference %.12f\n\n",
rz_total_e_mass - (initial_ie_mass + initial_ke_mass));
}
// Scatter the subcell momentum to the node centered velocities
void scatter_momentum(const int nnodes, vec_t* initial_momentum,
int* nodes_to_cells_offsets, int* nodes_to_cells,
int* cells_to_nodes_offsets, int* cells_to_nodes,
double* velocity_x, double* velocity_y,
double* velocity_z, double* nodal_mass,
double* subcell_mass, double* subcell_momentum_x,
double* subcell_momentum_y, double* subcell_momentum_z) {
double total_momentum_x = 0.0;
double total_momentum_y = 0.0;
double total_momentum_z = 0.0;
#pragma omp parallel for reduction(+ : total_momentum_x, total_momentum_y, \
total_momentum_z)
for (int nn = 0; nn < nnodes; ++nn) {
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
double mass_at_node = 0.0;
double node_momentum_x = 0.0;
double node_momentum_y = 0.0;
double node_momentum_z = 0.0;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
// Determine the position of the node in the cell
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
const int subcell_index = cell_to_nodes_off + nn2;
node_momentum_x += subcell_momentum_x[(subcell_index)];
node_momentum_y += subcell_momentum_y[(subcell_index)];
node_momentum_z += subcell_momentum_z[(subcell_index)];
mass_at_node += subcell_mass[(subcell_index)];
}
nodal_mass[(nn)] = mass_at_node;
total_momentum_x += node_momentum_x;
total_momentum_y += node_momentum_y;
total_momentum_z += node_momentum_z;
velocity_x[(nn)] = node_momentum_x / nodal_mass[(nn)];
velocity_y[(nn)] = node_momentum_y / nodal_mass[(nn)];
velocity_z[(nn)] = node_momentum_z / nodal_mass[(nn)];
}
printf("Initial total momentum %.12f %.12f %.12f\n", initial_momentum->x,
initial_momentum->y, initial_momentum->z);
printf("Rezoned total momentum %.12f %.12f %.12f\n", total_momentum_x,
total_momentum_y, total_momentum_z);
printf("Difference %.12f %.12f %.12f\n\n",
initial_momentum->x - total_momentum_x,
initial_momentum->y - total_momentum_y,
initial_momentum->z - total_momentum_z);
}
|
Renderer.h | #pragma once
#include <Utils.h>
#include <Player.h>
#include <Raycaster.h>
class Renderer
{
public:
Renderer(Player* player, Field* field, Raycaster* raycaster, bool useMP, bool skipPixels)
: player(player), field(field), raycaster(raycaster), useMP(useMP), skipPixels(skipPixels)
{}
void FillPixel(glm::vec4& pos, glm::vec4& v, uint8_t* buffer, int index)
{
glm::u8vec3 pixel(250, 250, 250);
float dist;
raycaster->FindPixel(pos, v, pixel, dist);
//// distance fog
//static const int FOG_RANGE = 30;
//static const int FOG_STRENGH = 120; // 0 - 255
//int t = std::min(int(dist * FOG_RANGE), FOG_STRENGH);
//pixel.x = std::min(t + pixel.x, 255);
//pixel.y = std::min(t + pixel.y, 255);
//pixel.z = std::min(t + pixel.z, 255);
buffer[index * 4 ] = pixel.x;
buffer[index * 4 + 1] = pixel.y;
buffer[index * 4 + 2] = pixel.z;
buffer[index * 4 + 3] = 255;
}
void FillPixelAtXY(uint8_t* buffer, const int x, const int y,
const int viewWidth, const int viewHeight, const int skipEven)
{
int index = y*viewWidth + x;
int W2 = viewWidth / 2;
int H2 = viewHeight / 2;
if (skipPixels)
if (y % 2 == 0)
if (x % 2 == skipEven) return;
else
if (x % 2 == (skipEven == 0 ? 1 : 0)) return;
float dY = (float(y - H2) / W2);
float dX = (float(x - W2) / W2);
glm::vec4 rayDy = player->vy * dY;
glm::vec4 rayDx = player->vz * dX;
glm::vec4 raycastVec = player->vx + rayDy + rayDx;
FillPixel(player->pos, raycastVec, buffer, index);
}
void ThreadedCycle(uint8_t* buffer, const int viewWidth, const int viewHeight, const int skipEven)
{
#pragma omp parallel for
for (int x = 0; x < viewWidth; x++)
for (int y = 0; y < viewHeight; y++)
{
FillPixelAtXY(buffer, x, y, viewWidth, viewHeight, skipEven);
}
}
void SimpleCycle(uint8_t* buffer, const int viewWidth, const int viewHeight, const int skipEven)
{
for (int x = 0; x < viewWidth; x++)
for (int y = 0; y < viewHeight; y++)
{
FillPixelAtXY(buffer, x, y, viewWidth, viewHeight, skipEven);
}
}
void FillTexData(uint8_t* buffer, const int viewWidth, const int viewHeight)
{
static int skipEven = 0;
if (useMP)
ThreadedCycle(buffer, viewWidth, viewHeight, skipEven);
else
SimpleCycle(buffer, viewWidth, viewHeight, skipEven);
skipEven = skipEven == 0 ? 1 : 0;
}
bool useMP;
bool skipPixels;
Player* player = nullptr;
Field* field = nullptr;
Raycaster* raycaster = nullptr;
}; |
conv3x3s1_winograd64_neon5_permute.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_neon5_permute(const Mat& bottom_blob, Mat& top_blob, const Option& opt,
int inch, int outw, int outh, int outch)
{
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
// permute
// bottom_blob_tm.create(1, 64 * tiles, inch);
// Mat bottom_blob_tm2(inch, tiles, 64);
Mat bottom_blob_tm2 = top_blob;
Mat bottom_blob_tm = bottom_blob;
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
float* tm2p = tm2.row(i/8);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
vst1q_f32(tm2p, _r0);
vst1q_f32(tm2p+4, _r0n);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
tm2p[4] = r0[4];
tm2p[5] = r0[5];
tm2p[6] = r0[6];
tm2p[7] = r0[7];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 8;
}
}
for (; i+3<tiles; i+=4)
{
float* tm2p = tm2.row(i/8+(i%8)/4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
vst1q_f32(tm2p, _r0);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 4;
}
}
for (; i<tiles; i++)
{
float* tm2p = tm2.row(i/8+(i%8)/4+i%4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
tm2p[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tm2p += 1;
}
}
}
}
}
}
|
GB_unaryop__abs_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_int32
// op(A') function: GB_tran__abs_fp32_int32
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_int32
(
float *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parameters.h | //
// smarties
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati (novatig@ethz.ch).
//
#ifndef smarties_Parameters_h
#define smarties_Parameters_h
#include "Functions.h"
#include "../../Utils/MPIUtilities.h"
namespace smarties
{
struct Parameters;
using ParametersPtr_t = std::shared_ptr<Parameters>;
static inline nnReal* allocate_param(const Uint size, const Real mpiSize)
{
// round up such that distributed ops can be vectorized on each rank:
Uint extraSize = Utilities::roundUpSimd( std::ceil(size/mpiSize)) * mpiSize;
return Utilities::allocate_ptr<nnReal>(extraSize);
}
struct Parameters
{
const std::vector<Uint> nBiases, nWeights;
std::vector<Uint> indBiases, indWeights;
const Uint nParams, nLayers, mpiSize;
mutable bool written = false;
// array containing all parameters of network contiguously
//(used by optimizer and for MPI reductions)
nnReal* const params;
ParametersPtr_t allocateEmptyAlike() const
{
return std::make_shared<Parameters>(nWeights, nBiases, mpiSize);
}
void broadcast(const MPI_Comm comm) const
{
MPI_Bcast(params, nParams, SMARTIES_MPI_NNVALUE_TYPE, 0, comm);
}
void copy(const ParametersPtr_t& tgt) const
{
assert(nParams == tgt->nParams);
memcpy(params, tgt->params, nParams*sizeof(nnReal));
}
Parameters(const std::vector<Uint> _nWeights,
const std::vector<Uint> _nBiases,
const Uint _mpisize ) :
nBiases(_nBiases), nWeights(_nWeights),
nParams(_computeNParams(_nWeights, _nBiases)),
nLayers(_nWeights.size()), mpiSize(_mpisize),
params( allocate_param(nParams, _mpisize) ) { }
~Parameters() {
if(params not_eq nullptr) free(params);
}
void reduceThreadsGrad(const std::vector<ParametersPtr_t>& grads) const
{
#pragma omp parallel num_threads(grads.size())
{
const Uint thrI = omp_get_thread_num(), thrN = omp_get_num_threads();
assert( thrN == grads.size() && thrI < thrN );
assert( nParams == grads[thrI]->nParams );
const Uint shift = Utilities::roundUpSimd( nParams/ (Real)thrN );
assert( thrN * shift >= nParams ); // ensure coverage
const nnReal *const src = grads[thrI]->params;
nnReal *const dst = params;
for(Uint i=0; i<thrN; ++i)
{
const Uint turn = (thrI + i) % thrN;
const Uint start = turn * shift;
const Uint end = std::min(nParams, (turn+1)*shift);
//#pragma omp critical
//{ cout<<turn<<" "<<start<<" "<<end<<" "<<thrI<<" "
// <<thrN<<" "<<shift<<" "<<nParams<<endl; fflush(0); }
if(grads[thrI]->written) {
#pragma omp simd aligned(dst, src : VEC_WIDTH)
for(Uint j=start; j<end; ++j) {
assert( Utilities::isValidValue(src[j]) );
dst[j] += src[j];
#ifndef NDEBUG
//gradMagn[thrI] += src[j]*src[j];
#endif
}
}
#pragma omp barrier
}
grads[thrI]->clear();
}
//cout<<endl;
#ifndef NDEBUG
//cout<<"Grad magnitudes:"<<print(gradMagn)<<endl;
#endif
}
long double compute_weight_norm() const
{
long double sumWeights = 0;
#pragma omp parallel for schedule(static) reduction(+:sumWeights)
for (Uint w=0; w<nParams; ++w) sumWeights += std::pow(params[w],2);
return std::sqrt(sumWeights);
}
long double compute_weight_L1norm() const
{
long double sumWeights = 0;
#pragma omp parallel for schedule(static) reduction(+:sumWeights)
for (Uint w=0; w<nParams; ++w) sumWeights += std::fabs(params[w]);
return sumWeights;
}
long double compute_weight_dist(const ParametersPtr_t& TGT) const
{
long double dist = 0;
#pragma omp parallel for schedule(static) reduction(+ : dist)
for(Uint w=0; w<nParams; ++w) dist += std::pow(params[w]-TGT->params[w], 2);
return std::sqrt(dist);
}
void clear() const
{
std::memset(params, 0, nParams*sizeof(nnReal));
written = false;
}
void set(const nnReal val) const
{
#pragma omp parallel for schedule(static)
for(Uint j=0; j<nParams; ++j) params[j] = val;
}
nnReal* W(const Uint layerID) const {
assert(layerID < nLayers);
return params + indWeights[layerID];
}
nnReal* B(const Uint layerID) const {
assert(layerID < nLayers);
return params + indBiases[layerID];
}
Uint NW(const Uint layerID) const {
assert(layerID < nLayers);
return nWeights[layerID];
}
Uint NB(const Uint layerID) const {
assert(layerID < nLayers);
return nBiases[layerID];
}
private:
//each layer requests a certain number of parameters, here compute contiguous
//memory required such that each layer gets an aligned pointer to both
//its first bias and and first weight, allowing SIMD ops on all layers
Uint _computeNParams(std::vector<Uint> _nWeights, std::vector<Uint> _nBiases)
{
assert(_nWeights.size() == _nBiases.size());
const Uint _nLayers = _nWeights.size();
Uint nTotPara = 0;
indBiases = std::vector<Uint>(_nLayers, 0);
indWeights = std::vector<Uint>(_nLayers, 0);
for(Uint i=0; i<_nLayers; ++i) {
indWeights[i] = nTotPara;
nTotPara += Utilities::roundUpSimd(_nWeights[i]);
indBiases[i] = nTotPara;
nTotPara += Utilities::roundUpSimd( _nBiases[i]);
}
//printf("Weight sizes:[%s] inds:[%s] Bias sizes:[%s] inds[%s] Total:%u\n",
// print(_nWeights).c_str(), print(indWeights).c_str(),
// print(_nBiases).c_str(), print(indBiases).c_str(), nTotPara);
return nTotPara;
}
};
inline std::vector<ParametersPtr_t> allocManyParams(const ParametersPtr_t& W,
const Uint populationSize)
{
std::vector<ParametersPtr_t> ret(populationSize, nullptr);
// numa-aware allocation if OMP_PROC_BIND is TRUE:
#pragma omp parallel for schedule(static, 1)
for(Uint i=0; i<populationSize; ++i) ret[i] = W->allocateEmptyAlike();
return ret;
}
} // end namespace smarties
#endif // smarties_Quadratic_term_h
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class UntiedTaskLocalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>> &LocalVars);
~UntiedTaskLocalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Maps function to the position of the untied task locals stack.
llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
bool IgnoreAddressId = false) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, llvm::Value *MapName,
CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Get the function for the specified user-defined mapper. If it does not
/// exist, create one.
llvm::Function *
getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
virtual void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen,
SourceLocation Loc,
const Expr *Filter = nullptr);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
/// Set to true if Clang emits separate runtime calls for the beginning and
/// end of the region. These calls might have separate map type arrays.
bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library for the beginning
/// of the region or for the entire region if there are no separate map
/// types for the region end.
llvm::Value *MapTypesArray = nullptr;
/// The array of map types passed to the runtime library for the end of the
/// region, or nullptr if there are no separate map types for the region
/// end.
llvm::Value *MapTypesArrayEnd = nullptr;
/// The array of user-defined mappers passed to the runtime library.
llvm::Value *MappersArray = nullptr;
/// The array of original declaration names of mapped pointers sent to the
/// runtime library for debugging
llvm::Value *MapNamesArray = nullptr;
/// Indicate whether any user-defined mapper exists.
bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo),
SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
MapTypesArrayEnd = nullptr;
MapNamesArray = nullptr;
MappersArray = nullptr;
HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
/// Returns true if the variable is a local variable in untied task.
bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc,
const Expr *Filter = nullptr) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
jacobi-ompacc-opt1.c | // Using target data to promote data allocation to higher level, enabling reusing in iterations
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,(struct timezone*)NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 512
int n,m,mits;
#define REAL float // flexible between float and double
REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!!
REAL tol,relax=1.0,alpha=0.0543;
REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
REAL dx,dy;
int main (void)
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
REAL omega;
int i,j,k;
REAL error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
// An optimization on top of naive coding: promoting data handling outside the while loop
// data properties may change since the scope is bigger:
#pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m])
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
// {
#pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m])
#pragma omp parallel for private(j,i)
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m])
#pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
// }
/* omp end parallel */
/* Error check */
if (k%500==0)
printf("Finished %d iteration with error =%f\n",k, error);
error = sqrt(error)/(n*m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
printf("Residual_ref :%E\n", resid_ref);
printf ("Diff ref=%E\n", fabs(error-resid_ref));
assert (fabs(error-resid_ref) < 1E-14);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
REAL xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf ("Diff ref=%E\n", fabs(error-error_ref));
assert (fabs(error-error_ref) < 1E-14);
}
|
nbody_tools.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <omp.h>
#include "ui.h"
#include "nbody.h"
#include "nbody_tools.h"
#include "nbody_alloc.h"
extern node_t* root;
/* draw recursively the content of a node */
void draw_node(node_t* n) {
#ifndef DISPLAY
return;
#else
if(!n)
return;
#if DRAW_BOXES
int x1 = POS_TO_SCREEN(n->x_min);
int y1 = POS_TO_SCREEN(n->y_min);
int x2 = POS_TO_SCREEN(n->x_max);
int y2 = POS_TO_SCREEN(n->y_max);
draw_rect(x1, y1, x2, y2);
#endif
if(n->particle) {
int x = POS_TO_SCREEN(n->particle->x_pos);
int y = POS_TO_SCREEN(n->particle->y_pos);
draw_point (x,y);
}
if(n->children) {
#if 0
/* draw a red point that represents the center of the node */
int x = POS_TO_SCREEN(n->x_center);
int y = POS_TO_SCREEN(n->y_center);
draw_red_point (x,y);
#endif
int i;
for(i=0; i<4; i++) {
draw_node(&n->children[i]);
}
}
#endif
}
/* print recursively the particles of a node */
void print_particles(FILE* f, node_t*n) {
if(!n) {
return;
}
if(n->particle) {
particle_t*p = n->particle;
fprintf(f, "particle={pos=(%f,%f), vel=(%f,%f)}\n", p->x_pos, p->y_pos, p->x_vel, p->y_vel);
}
if(n->children) {
int i;
for(i=0; i<4; i++) {
print_particles(f, &n->children[i]);
}
}
}
/* Initialize a node */
void init_node(node_t* n, node_t* parent, double x_min, double x_max, double y_min, double y_max) {
n->parent = parent;
n->children = NULL;
n->n_particles = 0;
n->particle = NULL;
n->x_min = x_min;
n->x_max = x_max;
n->y_min = y_min;
n->y_max = y_max;
n->depth = 0;
int depth=1;
while(parent) {
if(parent->depth < depth) {
parent->depth = depth;
depth++;
}
parent = parent->parent;
}
n->mass= 0;
n->x_center = 0;
n->y_center = 0;
assert(x_min != x_max);
assert(y_min != y_max);
}
/* Compute the position of a particle in a node and return
* the quadrant in which it should be placed
*/
int get_quadrant(particle_t* particle, node_t*node) {
double x_min = node->x_min;
double x_max = node->x_max;
double x_center = x_min+(x_max-x_min)/2;
double y_min = node->y_min;
double y_max = node->y_max;
double y_center = y_min+(y_max-y_min)/2;
assert(particle->x_pos>=node->x_min);
assert(particle->x_pos<=node->x_max);
assert(particle->y_pos>=node->y_min);
assert(particle->y_pos<=node->y_max);
if(particle->x_pos <= x_center) {
if(particle->y_pos <= y_center) {
return 0;
} else {
return 2;
}
} else {
if(particle->y_pos <= y_center) {
return 1;
} else {
return 3;
}
}
}
/* inserts a particle in a node (or one of its children) */
void insert_particle(particle_t* particle, node_t*node) {
#if 0
assert(particle->x_pos >= node->x_min);
assert(particle->x_pos <= node->x_max);
assert(particle->y_pos >= node->y_min);
assert(particle->y_pos <= node->y_max);
assert(particle->node == NULL);
#endif
if(node->n_particles == 0 &&
node->children == NULL) {
assert(node->children == NULL);
/* there's no particle. insert directly */
node->particle = particle;
node->n_particles++;
node->x_center = particle->x_pos;
node->y_center = particle->y_pos;
node->mass = particle->mass;
//particle->node = node;
assert(node->children == NULL);
return;
} else {
/* There's already a particle */
if(! node->children) {
/* there's no children yet */
/* create 4 children and move the already-inserted particle to one of them */
//assert(node->x_min != node->x_max);
node->children = alloc_node();
double x_min = node->x_min;
double x_max = node->x_max;
double x_center = x_min+(x_max-x_min)/2;
double y_min = node->y_min;
double y_max = node->y_max;
double y_center = y_min+(y_max-y_min)/2;
init_node(&node->children[0], node, x_min, x_center, y_min, y_center);
init_node(&node->children[1], node, x_center, x_max, y_min, y_center);
init_node(&node->children[2], node, x_min, x_center, y_center, y_max);
init_node(&node->children[3], node, x_center, x_max, y_center, y_max);
/* move the already-inserted particle to one of the children */
particle_t*ptr = node->particle;
//assert(ptr->node == node);
int quadrant = get_quadrant(ptr, node);
node->particle = NULL;
//ptr->node = NULL;
insert_particle(ptr, &node->children[quadrant]);
}
/* insert the particle to one of the children */
int quadrant = get_quadrant(particle, node);
node->n_particles++;
//assert(particle->node == NULL);
insert_particle(particle, &node->children[quadrant]);
/* update the mass and center of the node */
double total_mass = 0;
double total_x = 0;
double total_y = 0;
int i;
for(i=0; i<4; i++) {
total_mass += node->children[i].mass;
total_x += node->children[i].x_center*node->children[i].mass;
total_y += node->children[i].y_center*node->children[i].mass;
}
node->mass = total_mass;
node->x_center = total_x/total_mass;
node->y_center = total_y/total_mass;
#if 0
assert(node->particle == NULL);
assert(node->n_particles > 0);
#endif
}
}
/*
Place particles in their initial positions.
*/
void all_init_particles(int num_particles, particle_t *particles)
{
int i;
double total_particle = num_particles;
#pragma omp parallel for private(i) schedule(dynamic)
for (i = 0; i < num_particles; i++) {
particle_t *particle = &particles[i];
#if 0
particle->x_pos = ((rand() % max_resolution)- (max_resolution/2))*2.0 / max_resolution;
particle->y_pos = ((rand() % max_resolution)- (max_resolution/2))*2.0 / max_resolution;
particle->x_vel = particle->y_pos;
particle->y_vel = particle->x_pos;
#else
particle->x_pos = i*2.0/nparticles - 1.0;
particle->y_pos = 0.0;
particle->x_vel = 0.0;
particle->y_vel = particle->x_pos;
#endif
particle->mass = 1.0 + (num_particles+i)/total_particle;
//particle->node = NULL;
//insert_particle(particle, root);
}
}
struct memory_t mem_node;
void init_alloc(int nb_blocks) {
mem_init(&mem_node, 4*sizeof(node_t), nb_blocks);
}
/* allocate a block of 4 nodes */
node_t* alloc_node() {
node_t*ret = mem_alloc(&mem_node);
return ret;
}
void free_root(node_t*root) {
free_node(root);
mem_free(&mem_node, root);
}
void free_node(node_t* n) {
if(!n) return;
if(n->children) {
//assert(n->n_particles > 0);
int i;
for(i=0; i<4; i++) {
free_node(&n->children[i]);
}
mem_free(&mem_node, n->children);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.