source
stringlengths
3
92
c
stringlengths
26
2.25M
vla_crash.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" int a; void foo() { int(*b)[a]; int *(**c)[a]; #pragma omp parallel if (0) b[0][0] = c[0][a][0][a]; } void bar(int n, int *a) { // expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}} int(*p)[n] = &a; #pragma omp parallel if(0) // expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}} if (p == &a) { } } // CHECK1-LABEL: define {{[^@]+}}@foo // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[C:%.*]] = alloca i32***, align 8 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK1-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[B]], i64 [[TMP4]], i32**** [[C]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i64 [[VLA1:%.*]], i32**** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32****, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i32**** [[C]], i32***** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i32****, i32***** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32***, i32**** [[TMP3]], align 8 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32**, i32*** [[TMP4]], i64 0 // CHECK1-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK1-NEXT: [[TMP7:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32*, i32** [[TMP5]], i64 [[TMP7]] // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32*, i32** [[ARRAYIDX3]], i64 0 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[ARRAYIDX4]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM5]] // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP1]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 0, [[TMP0]] // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[TMP12]] // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX7]], i64 0 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX8]], align 4 // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@bar // CHECK1-SAME: (i32 signext [[N:%.*]], i32* [[A:%.*]]) #[[ATTR0]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[P:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32** [[A_ADDR]] to i32* // CHECK1-NEXT: store i32* [[TMP3]], i32** [[P]], align 8 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK1-NEXT: call void @.omp_outlined..1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[P]], i32** [[A_ADDR]]) #[[ATTR2]] // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[P:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[P_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i32** [[P]], i32*** [[P_ADDR]], align 8 // CHECK1-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[P_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32** [[TMP2]] to i32* // CHECK1-NEXT: [[CMP:%.*]] = icmp eq i32* [[TMP3]], [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] // CHECK1: if.then: // CHECK1-NEXT: br label [[IF_END]] // CHECK1: if.end: // CHECK1-NEXT: ret void //
omp.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <omp.h> //------------------------------------------------------------------------------------------------------------------------------ void defaultThreadingForLevel(level_type *level){ int omp_threads = 1; int omp_nested = 0; #pragma omp parallel { #pragma omp master { omp_threads = omp_get_num_threads(); omp_nested = omp_get_nested(); } } // set default parameters for threading... level->threads_per_box = omp_threads; level->concurrent_boxes = 1; } //------------------------------------------------------------------------------------------------------------------------------ void tuneThreadingForLevel(level_type *level){ int omp_threads = 1; int omp_nested = 0; #pragma omp parallel { #pragma omp master { omp_threads = omp_get_num_threads(); omp_nested = omp_get_nested(); } } // inspect omp_nested, omp_num_threads, the number of boxes, and the box size, and choose the optimal varlues for // threads_per_box // concurrent_boxes } //------------------------------------------------------------------------------------------------------------------------------
StressCPU.c
#include <stdio.h> #include <omp.h> int main() { int current = 1; int totalPrimes = 0; int threshold = 1000000; while(1) { #pragma omp parallel for schedule(dynamic) reduction(+ : totalPrimes) for (current = 1; current <= threshold; current++) { int i = 2; while(i <= current) { if(current % i == 0) break; i++; } if(i == current) totalPrimes++; } } printf("%d prime numbers under %d\n",totalPrimes,threshold); return 0; }
GB_unop__identity_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint16) // op(A') function: GB (_unop_tran__identity_uint64_uint16) // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint16) ( uint64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resize_bicubic.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_RESIZE_BICUBIC_H_ #define MACE_KERNELS_RESIZE_BICUBIC_H_ #include <algorithm> #include <cmath> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" #include "mace/utils/logging.h" namespace mace { namespace kernels { static const int64_t kTableSize = (1 << 10); inline const std::shared_ptr<float> InitCoeffsTable() { // Allocate and initialize coefficients table using Bicubic // convolution algorithm. // https://en.wikipedia.org/wiki/Bicubic_interpolation auto coeffs_tab = std::shared_ptr<float>(new float[(kTableSize + 1) * 2], std::default_delete<float[]>()); float *coeffs_tab_ptr = coeffs_tab.get(); static const double A = -0.75; for (int i = 0; i <= kTableSize; ++i) { float x = i * 1.0 / kTableSize; coeffs_tab_ptr[i * 2] = ((A + 2) * x - (A + 3)) * x * x + 1; x += 1.0; coeffs_tab_ptr[i * 2 + 1] = ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } return coeffs_tab; } inline const float *GetCoeffsTable() { // Static so that we initialize it on first use static const std::shared_ptr<float> coeffs_tab = InitCoeffsTable(); return coeffs_tab.get(); } inline int64_t Bound(int64_t val, int64_t limit) { return std::min<int64_t>(limit - 1ll, std::max<int64_t>(0ll, val)); } inline void GetWeightsAndIndices(float scale, int64_t out_loc, int64_t limit, std::vector<float> *weights, std::vector<int64_t> *indices) { const int64_t in_loc = scale * out_loc; const float delta = scale * out_loc - in_loc; const int64_t offset = lrintf(delta * kTableSize); const float *coeffs_tab = GetCoeffsTable(); *weights = {coeffs_tab[offset * 2 + 1], coeffs_tab[offset * 2], coeffs_tab[(kTableSize - offset) * 2], coeffs_tab[(kTableSize - offset) * 2 + 1]}; *indices = {Bound(in_loc - 1, limit), Bound(in_loc, limit), Bound(in_loc + 1, limit), Bound(in_loc + 2, limit)}; } inline float Interpolate1D(const std::vector<float> &weights, const std::vector<float> &values) { return values[0] * weights[0] + values[1] * weights[1] + values[2] * weights[2] + values[3] * weights[3]; } inline float CalculateResizeScale(index_t in_size, index_t out_size, bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1) : in_size / static_cast<float>(out_size); } inline void ResizeImage(const float *images, const index_t batch_size, const index_t in_height, const index_t in_width, const index_t out_height, const index_t out_width, const index_t channels, const float height_scale, const float width_scale, float *output) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch_size; ++b) { for (index_t y = 0; y < out_height; ++y) { std::vector<float> y_weights; std::vector<index_t> y_indices; GetWeightsAndIndices(height_scale, y, in_height, &y_weights, &y_indices); for (index_t x = 0; x < out_width; ++x) { std::vector<float> x_weights; std::vector<index_t> x_indices; GetWeightsAndIndices(width_scale, x, in_width, &x_weights, &x_indices); for (index_t c = 0; c < channels; ++c) { // Use a 4x4 patch to compute the interpolated output value at // (b, y, x, c). const float *channel_input_ptr = images + (b * channels + c) * in_height * in_width; float *channel_output_ptr = output + (b * channels + c) * out_height * out_width; std::vector<float> coeff(4, 0.0); for (index_t i = 0; i < 4; ++i) { const std::vector<float> values = { static_cast<float>(channel_input_ptr [y_indices[i] * in_width + x_indices[0]]), static_cast<float>(channel_input_ptr [y_indices[i] * in_width + x_indices[1]]), static_cast<float>(channel_input_ptr [y_indices[i] * in_width + x_indices[2]]), static_cast<float>(channel_input_ptr [y_indices[i] * in_width + x_indices[3]])}; coeff[i] = Interpolate1D(x_weights, values); } channel_output_ptr[y * out_width + x] = Interpolate1D(y_weights, coeff); } } } } } template<DeviceType D, typename T> struct ResizeBicubicFunctor; template<> struct ResizeBicubicFunctor<DeviceType::CPU, float> : OpKernel { ResizeBicubicFunctor(OpKernelContext *context, const bool align_corners, const std::vector<index_t> &size) : OpKernel(context), align_corners_(align_corners) { MACE_CHECK(size.size() == 2); out_height_ = size[0]; out_width_ = size[1]; } MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); const index_t batch = input->dim(0); const index_t channels = input->dim(1); const index_t in_height = input->dim(2); const index_t in_width = input->dim(3); index_t out_height = out_height_; index_t out_width = out_width_; MACE_CHECK(out_height > 0 && out_width > 0); std::vector<index_t> out_shape{batch, channels, out_height, out_width}; MACE_RETURN_IF_ERROR(output->Resize(out_shape)); Tensor::MappingGuard input_mapper(input); Tensor::MappingGuard output_mapper(output); const float *input_data = input->data<float>(); float *output_data = output->mutable_data<float>(); if (out_height == in_height && out_width == in_width) { std::copy(input_data, input_data + batch * channels * in_height * in_width, output_data); return MACE_SUCCESS; } float height_scale = CalculateResizeScale(in_height, out_height, align_corners_); float width_scale = CalculateResizeScale(in_width, out_width, align_corners_); ResizeImage(input_data, batch, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, output_data); return MACE_SUCCESS; } bool align_corners_; index_t out_height_; index_t out_width_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLResizeBicubicKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLResizeBicubicKernel); }; template<typename T> struct ResizeBicubicFunctor<DeviceType::GPU, T> : OpKernel { ResizeBicubicFunctor(OpKernelContext *context, bool align_corners, const std::vector<index_t> &size); MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLResizeBicubicKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_RESIZE_BICUBIC_H_
genome.c
/* ============================================================================= * * genome.c * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "gene.h" #include "random.h" #include "segments.h" #include "sequencer.h" #include "thread.h" #include "timer.h" #include "tm.h" #include "vector.h" enum param_types { PARAM_GENE = (unsigned char)'g', PARAM_NUMBER = (unsigned char)'n', PARAM_SEGMENT = (unsigned char)'s', PARAM_THREAD = (unsigned char)'t', }; #define PARAM_DEFAULT_GENE (1L << 14) #define PARAM_DEFAULT_NUMBER (1L << 22) #define PARAM_DEFAULT_SEGMENT (1L << 6) #define PARAM_DEFAULT_THREAD (1L) long global_params[256]; /* 256 = ascii limit */ /* ============================================================================= * displayUsage * ============================================================================= */ static void displayUsage (const char* appName) { printf("Usage: %s [options]\n", appName); puts("\nOptions: (defaults)\n"); printf(" g <UINT> Length of [g]ene (%li)\n", PARAM_DEFAULT_GENE); printf(" n <UINT> Min [n]umber of segments (%li)\n", PARAM_DEFAULT_NUMBER); printf(" s <UINT> Length of [s]egment (%li)\n", PARAM_DEFAULT_SEGMENT); printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_THREAD); puts(""); puts("The actual number of segments created may be greater than -n"); puts("in order to completely cover the gene."); exit(1); } /* ============================================================================= * setDefaultParams * ============================================================================= */ static void setDefaultParams( void ) { global_params[PARAM_GENE] = PARAM_DEFAULT_GENE; global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER; global_params[PARAM_SEGMENT] = PARAM_DEFAULT_SEGMENT; global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD; } /* ============================================================================= * parseArgs * ============================================================================= */ static void parseArgs (long argc, char* const argv[]) { long i; long opt; opterr = 0; setDefaultParams(); while ((opt = getopt(argc, argv, "g:n:s:t:")) != -1) { switch (opt) { case 'g': case 'n': case 's': case 't': global_params[(unsigned char)opt] = atol(optarg); break; case '?': default: opterr++; break; } } for (i = optind; i < argc; i++) { fprintf(stderr, "Non-option argument: %s\n", argv[i]); opterr++; } if (opterr) { displayUsage(argv[0]); } } /* ============================================================================= * main * ============================================================================= */ MAIN (argc,argv) { TIMER_T start; TIMER_T stop; /* Initialization */ parseArgs(argc, (char** const)argv); SIM_GET_NUM_CPU(global_params[PARAM_THREAD]); printf("Creating gene and segments... "); fflush(stdout); long geneLength = global_params[PARAM_GENE]; long segmentLength = global_params[PARAM_SEGMENT]; long minNumSegment = global_params[PARAM_NUMBER]; long numThread = global_params[PARAM_THREAD]; random_t* randomPtr; gene_t* genePtr; char* gene; segments_t* segmentsPtr; sequencer_t* sequencerPtr; TM_STARTUP(numThread); P_MEMORY_STARTUP(numThread); TM_THREAD_ENTER(); // TM_BEGIN(); randomPtr= random_alloc(); assert(randomPtr != NULL); random_seed(randomPtr, 0); genePtr = gene_alloc(geneLength); assert( genePtr != NULL); gene_create(genePtr, randomPtr); gene = genePtr->contents; segmentsPtr = segments_alloc(segmentLength, minNumSegment); assert(segmentsPtr != NULL); segments_create(segmentsPtr, genePtr, randomPtr); sequencerPtr = sequencer_alloc(geneLength, segmentLength, segmentsPtr); assert(sequencerPtr != NULL); //TM_END(); thread_startup(numThread); puts("done."); printf("Gene length = %li\n", genePtr->length); printf("Segment length = %li\n", segmentsPtr->length); printf("Number segments = %li\n", vector_getSize(segmentsPtr->contentsPtr)); fflush(stdout); /* Benchmark */ printf("Sequencing gene... "); fflush(stdout); // NB: Since ASF/PTLSim "REAL" is native execution, and since we are using // wallclock time, we want to be sure we read time inside the // simulator, or else we report native cycles spent on the benchmark // instead of simulator cycles. GOTO_SIM(); TIMER_READ(start); #ifdef OTM #pragma omp parallel { sequencer_run(sequencerPtr); } #else thread_start(sequencer_run, (void*)sequencerPtr); #endif TIMER_READ(stop); // NB: As above, timer reads must be done inside of the simulated region // for PTLSim/ASF GOTO_REAL(); puts("done."); printf("Time = %lf\n", TIMER_DIFF_SECONDS(start, stop)); fflush(stdout); /* Check result */ { char* sequence; int result; //TM_BEGIN(); sequence= sequencerPtr->sequence; result = strcmp(gene, sequence); //TM_END(); printf("Sequence matches gene: %s\n", (result ? "no" : "yes")); if (result) { printf("gene = %s\n", gene); printf("sequence = %s\n", sequence); } fflush(stdout); assert(strlen(sequence) >= strlen(gene)); } /* Clean up */ printf("Deallocating memory... "); fflush(stdout); sequencer_free(sequencerPtr); segments_free(segmentsPtr); gene_free(genePtr); random_free(randomPtr); puts("done."); fflush(stdout); TM_SHUTDOWN(); P_MEMORY_SHUTDOWN(); thread_shutdown(); MAIN_RETURN(0); } /* ============================================================================= * * End of genome.c * * ============================================================================= */
GB_unop__identity_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint8_uint64 // op(A') function: GB_unop_tran__identity_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint8_uint64 ( uint8_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void CLDLGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src_1, const Tensor<cpu, 2, DType> &src_2, const Tensor<cpu, 2, DType> &src_3, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { DType prod = (1-src_2[y][x])*(1-src_3[y][x]); dst[y][k] = -1 / std::max(src_1[y][x], (DType)FLT_MIN) * std::pow(prod, (DType)0.5); } else { dst[y][x] = 0; } } } } template<typename DType> inline void CLDL(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &src_1, const Tensor<cpu, 1, DType> &src_2, const Tensor<cpu, 1, DType> &src_3) { for (index_t x = 0; x < dst.size(0); ++x) { DType prod = (1-src_2[x])*(1-src_3[x]); DType p = std::pow(prod, (DType)0.5); dst[x] = p * std::log(std::max(src_1[x], (DType)(FLT_MIN))); } } template<typename DType> inline void CLDL(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src_1, const Tensor<cpu, 2, DType> &src_2, const Tensor<cpu, 2, DType> &src_3) { CHECK_EQ(dst.shape_, src_1.shape_) << "CLDL: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { CLDL(dst[y], src_1[y], src_2[y], src_3[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const int K = dst.shape_[0]; for (index_t y = 0; y < index.size(0); ++y) { int j = index[y]; if (j <= 0) j = 0; else if (j >= K) j = K - 1; dst[j] += src[y]; } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
irbuilder_for_unsigned_dynamic.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@workshareloop_unsigned_dynamic( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 33, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: store i32 1, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: store i32 %[[DOTCOUNT]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 1073741859, i32 1, i32 %[[DOTCOUNT]], i32 1, i32 1) // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER_OUTER_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER:.*]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ %[[LB:.+]], %[[OMP_LOOP_PREHEADER_OUTER_COND]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[UB]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_PREHEADER_OUTER_COND]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT:.*]]: // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER_OUTER_COND]]: // CHECK-NEXT: %[[TMP14:.+]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]]) // CHECK-NEXT: %[[TMP15:.+]] = icmp ne i32 %[[TMP14]], 0 // CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[LB]] = sub i32 %[[TMP16]], 1 // CHECK-NEXT: br i1 %[[TMP15]], label %[[OMP_LOOP_HEADER]], label %[[OMP_LOOP_EXIT]] // CHECK-NEXT: } extern "C" void workshareloop_unsigned_dynamic(float *a, float *b, float *c, float *d) { #pragma omp for schedule(dynamic) for (unsigned i = 33; i < 32000000; i += 7) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45} // CHECK: ![[META2:[0-9]+]] =
GxB_Global_Option_get.c
//------------------------------------------------------------------------------ // GxB_Global_Option_get: get a global default option for all future matrices //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Global_Option_get // gets the current global option ( GxB_Option_Field field, // option to query ... // return value of the global option ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Global_Option_get (field, &value)") ; //-------------------------------------------------------------------------- // get the option //-------------------------------------------------------------------------- va_list ap ; switch (field) { //---------------------------------------------------------------------- // matrix format //---------------------------------------------------------------------- case GxB_HYPER_SWITCH : { va_start (ap, field) ; double *hyper_switch = va_arg (ap, double *) ; va_end (ap) ; GB_RETURN_IF_NULL (hyper_switch) ; (*hyper_switch) = (double) GB_Global_hyper_switch_get ( ) ; } break ; case GxB_BITMAP_SWITCH : { va_start (ap, field) ; double *bitmap_switch = va_arg (ap, double *) ; va_end (ap) ; GB_RETURN_IF_NULL (bitmap_switch) ; for (int k = 0 ; k < GxB_NBITMAP_SWITCH ; k++) { double b = (double) GB_Global_bitmap_switch_get (k) ; bitmap_switch [k] = b ; } } break ; case GxB_FORMAT : { va_start (ap, field) ; GxB_Format_Value *format = va_arg (ap, GxB_Format_Value *) ; va_end (ap) ; GB_RETURN_IF_NULL (format) ; (*format) = (GB_Global_is_csc_get ( )) ? GxB_BY_COL : GxB_BY_ROW ; } break ; //---------------------------------------------------------------------- // mode from GrB_init (blocking or non-blocking) //---------------------------------------------------------------------- case GxB_MODE : { va_start (ap, field) ; GrB_Mode *mode = va_arg (ap, GrB_Mode *) ; va_end (ap) ; GB_RETURN_IF_NULL (mode) ; (*mode) = GB_Global_mode_get ( ) ; } break ; //---------------------------------------------------------------------- // default number of threads //---------------------------------------------------------------------- case GxB_GLOBAL_NTHREADS : // same as GxB_NTHREADS { va_start (ap, field) ; int *nthreads_max = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (nthreads_max) ; (*nthreads_max) = GB_Global_nthreads_max_get ( ) ; } break ; //---------------------------------------------------------------------- // default chunk size //---------------------------------------------------------------------- case GxB_GLOBAL_CHUNK : // same as GxB_CHUNK { va_start (ap, field) ; double *chunk = va_arg (ap, double *) ; va_end (ap) ; GB_RETURN_IF_NULL (chunk) ; (*chunk) = GB_Global_chunk_get ( ) ; } break ; //---------------------------------------------------------------------- // memory pool control //---------------------------------------------------------------------- case GxB_MEMORY_POOL : { va_start (ap, field) ; int64_t *free_pool_limit = va_arg (ap, int64_t *) ; va_end (ap) ; GB_RETURN_IF_NULL (free_pool_limit) ; for (int k = 0 ; k < 64 ; k++) { free_pool_limit [k] = GB_Global_free_pool_limit_get (k) ; } } break ; //---------------------------------------------------------------------- // SuiteSparse:GraphBLAS version, date, license, etc //---------------------------------------------------------------------- case GxB_LIBRARY_NAME : { va_start (ap, field) ; char **name = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (name) ; (*name) = GxB_IMPLEMENTATION_NAME ; } break ; case GxB_LIBRARY_VERSION : { va_start (ap, field) ; int *version = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (version) ; version [0] = GxB_IMPLEMENTATION_MAJOR ; version [1] = GxB_IMPLEMENTATION_MINOR ; version [2] = GxB_IMPLEMENTATION_SUB ; } break ; case GxB_LIBRARY_DATE : { va_start (ap, field) ; char **date = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (date) ; (*date) = GxB_IMPLEMENTATION_DATE ; } break ; case GxB_LIBRARY_ABOUT : { va_start (ap, field) ; char **about = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (about) ; (*about) = GxB_IMPLEMENTATION_ABOUT ; } break ; case GxB_LIBRARY_LICENSE : { va_start (ap, field) ; char **license = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (license) ; (*license) = GxB_IMPLEMENTATION_LICENSE ; } break ; case GxB_LIBRARY_COMPILE_DATE : { va_start (ap, field) ; char **compile_date = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (compile_date) ; (*compile_date) = __DATE__ ; } break ; case GxB_LIBRARY_COMPILE_TIME : { va_start (ap, field) ; char **compile_time = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (compile_time) ; (*compile_time) = __TIME__ ; } break ; case GxB_LIBRARY_URL : { va_start (ap, field) ; char **url = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (url) ; (*url) = "http://faculty.cse.tamu.edu/davis/GraphBLAS" ; } break ; //---------------------------------------------------------------------- // GraphBLAS API version, date, etc //---------------------------------------------------------------------- case GxB_API_VERSION : { va_start (ap, field) ; int *api_version = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (api_version) ; api_version [0] = GxB_SPEC_MAJOR ; api_version [1] = GxB_SPEC_MINOR ; api_version [2] = GxB_SPEC_SUB ; } break ; case GxB_API_DATE : { va_start (ap, field) ; char **api_date = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (api_date) ; (*api_date) = GxB_SPEC_DATE ; } break ; case GxB_API_ABOUT : { va_start (ap, field) ; char **api_about = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (api_about) ; (*api_about) = GxB_SPEC_ABOUT ; } break ; case GxB_API_URL : { va_start (ap, field) ; char **api_url = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (api_url) ; (*api_url) = "http://graphblas.org" ; } break ; //---------------------------------------------------------------------- // compiler used to compile GraphBLAS //---------------------------------------------------------------------- case GxB_COMPILER_VERSION : { va_start (ap, field) ; int *compiler_version = va_arg (ap, int *) ; va_end (ap) ; GB_RETURN_IF_NULL (compiler_version) ; compiler_version [0] = GB_COMPILER_MAJOR ; compiler_version [1] = GB_COMPILER_MINOR ; compiler_version [2] = GB_COMPILER_SUB ; } break ; case GxB_COMPILER_NAME : { va_start (ap, field) ; char **compiler_name = va_arg (ap, char **) ; va_end (ap) ; GB_RETURN_IF_NULL (compiler_name) ; (*compiler_name) = GB_COMPILER_NAME ; } break ; //---------------------------------------------------------------------- // controlling diagnostic output //---------------------------------------------------------------------- case GxB_BURBLE : { va_start (ap, field) ; bool *burble = va_arg (ap, bool *) ; va_end (ap) ; GB_RETURN_IF_NULL (burble) ; (*burble) = GB_Global_burble_get ( ) ; } break ; case GxB_PRINTF : { va_start (ap, field) ; void **printf_func = va_arg (ap, void **) ; va_end (ap) ; GB_RETURN_IF_NULL (printf_func) ; (*printf_func) = (void *) GB_Global_printf_get ( ) ; } break ; case GxB_FLUSH : { va_start (ap, field) ; void **flush_func = va_arg (ap, void **) ; va_end (ap) ; GB_RETURN_IF_NULL (flush_func) ; (*flush_func) = (void *) GB_Global_flush_get ( ) ; } break ; case GxB_PRINT_1BASED : { va_start (ap, field) ; bool *onebased = va_arg (ap, bool *) ; va_end (ap) ; GB_RETURN_IF_NULL (onebased) ; (*onebased) = GB_Global_print_one_based_get ( ) ; } break ; //---------------------------------------------------------------------- // CUDA (DRAFT: in progress, do not use) //---------------------------------------------------------------------- case GxB_GLOBAL_GPU_CONTROL : // same as GxB_GPU_CONTROL { va_start (ap, field) ; GrB_Desc_Value *gpu_control = va_arg (ap, GrB_Desc_Value *) ; va_end (ap) ; GB_RETURN_IF_NULL (gpu_control) ; (*gpu_control) = GB_Global_gpu_control_get ( ) ; } break ; case GxB_GLOBAL_GPU_CHUNK : // same as GxB_GPU_CHUNK { va_start (ap, field) ; double *gpu_chunk = va_arg (ap, double *) ; va_end (ap) ; GB_RETURN_IF_NULL (gpu_chunk) ; (*gpu_chunk) = GB_Global_gpu_chunk_get ( ) ; } break ; default : return (GrB_INVALID_VALUE) ; } #pragma omp flush return (GrB_SUCCESS) ; }
mkl_quantized_conv_ops.h
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_ #define TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_ #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/tensor.h" #ifdef INTEL_MKL namespace tensorflow { template <class T> float MklFloatForOneQuantizedLevel(float range_min, float range_max) { int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest()); int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest()); // Adjusting for having a symmetric range. // for example: for 8-bit [-127, 127] as opposed to [-128, 127]. if (lowest < -highest) ++lowest; const float float_for_one_quantized_level = (range_max - range_min) / (highest - lowest); return float_for_one_quantized_level; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, float min_b, float max_b, float* min_c, float* max_c) { const float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); const float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b, max_b); const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest()); const float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; *min_c = c_float_for_one_quant_level * c_lowest; *max_c = c_float_for_one_quant_level * c_highest; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, const Tensor& min_b_vector, const Tensor& max_b_vector, Tensor** min_c_vector, Tensor** max_c_vector) { DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements()); DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements()); size_t n_channel = min_b_vector.NumElements(); const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest()); const float* min_b = min_b_vector.flat<float>().data(); const float* max_b = max_b_vector.flat<float>().data(); float* min_c = (*min_c_vector)->flat<float>().data(); float* max_c = (*max_c_vector)->flat<float>().data(); #ifndef ENABLE_MKLDNN_THREADPOOL #pragma omp parallel for #endif // !ENABLE_MKLDNN_THREADPOOL // TODO: Add eigen parallel_for for (int64_t n = 0; n < n_channel; ++n) { float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]); float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; min_c[n] = c_float_for_one_quant_level * c_lowest; max_c[n] = c_float_for_one_quant_level * c_highest; } } } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(16*t3+Nx+3,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),8*t4+6);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_memcpy.c
//------------------------------------------------------------------------------ // GB_memcpy: parallel memcpy //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Note that this function uses its own hard-coded chunk size. #include "GB.h" #define GB_MEM_CHUNK (1024*1024) void GB_memcpy // parallel memcpy ( void *dest, // destination const void *src, // source size_t n, // # of bytes to copy int nthreads // # of threads to use ) { if (nthreads <= 1 || n <= GB_MEM_CHUNK) { //---------------------------------------------------------------------- // memcpy using a single thread //---------------------------------------------------------------------- memcpy (dest, src, n) ; } else { //---------------------------------------------------------------------- // memcpy using a multiple threads //---------------------------------------------------------------------- nthreads = GB_IMIN (nthreads, n / GB_MEM_CHUNK) ; size_t nchunks = 1 + (n / GB_MEM_CHUNK) ; GB_void *pdest = dest ; const GB_void *psrc = src ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (k = 0 ; k < nchunks ; k++) { size_t start = k * GB_MEM_CHUNK ; if (start < n) { size_t chunk = GB_IMIN (n - start, GB_MEM_CHUNK) ; memcpy (pdest + start, psrc + start, chunk) ; } } } }
viter.c
#include "libimagequant.h" #include "pam.h" #include "viter.h" #include "nearest.h" #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif /* * Voronoi iteration: new palette color is computed from weighted average of colors that map to that palette entry. */ LIQ_PRIVATE void viter_init (const colormap * map, const unsigned int max_threads, viter_state average_color[]) { memset (average_color, 0, sizeof (average_color[0]) * (VITER_CACHE_LINE_GAP + map->colors) * max_threads); } LIQ_PRIVATE void viter_update_color (const f_pixel acolor, const float value, const colormap * map, unsigned int match, const unsigned int thread, viter_state average_color[]) { match += thread * (VITER_CACHE_LINE_GAP + map->colors); average_color[match].a += acolor.a * value; average_color[match].r += acolor.r * value; average_color[match].g += acolor.g * value; average_color[match].b += acolor.b * value; average_color[match].total += value; } LIQ_PRIVATE void viter_finalize (colormap * map, const unsigned int max_threads, const viter_state average_color[]) { for (unsigned int i = 0; i < map->colors; i++) { double a = 0, r = 0, g = 0, b = 0, total = 0; // Aggregate results from all threads for (unsigned int t = 0; t < max_threads; t++) { const unsigned int offset = (VITER_CACHE_LINE_GAP + map->colors) * t + i; a += average_color[offset].a; r += average_color[offset].r; g += average_color[offset].g; b += average_color[offset].b; total += average_color[offset].total; } if (total && !map->palette[i].fixed) { map->palette[i].acolor = (f_pixel) { .a = a / total,.r = r / total,.g = g / total,.b = b / total,}; } else { total = i / 1024.0; } map->palette[i].popularity = total; } } LIQ_PRIVATE double viter_do_iteration (histogram * hist, colormap * const map, const float min_opaque_val, viter_callback callback, const bool fast_palette) { viter_state *average_color; const unsigned int max_threads = omp_get_max_threads (); double total_diff = 0; average_color = g_alloca (sizeof (viter_state) * (VITER_CACHE_LINE_GAP + map->colors) * max_threads); viter_init (map, max_threads, average_color); { struct nearest_map *const n = nearest_init (map, fast_palette); hist_item *const achv = hist->achv; const int hist_size = hist->size; int j; #pragma omp parallel for if (hist_size > 3000) \ schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff) for (j = 0; j < hist_size; j++) { float diff; unsigned int match = nearest_search (n, achv[j].acolor, achv[j].tmp.likely_colormap_index, min_opaque_val, &diff); achv[j].tmp.likely_colormap_index = match; total_diff += diff * achv[j].perceptual_weight; viter_update_color (achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num (), average_color); if (callback) callback (&achv[j], diff); } nearest_free (n); } viter_finalize (map, max_threads, average_color); return total_diff / hist->total_perceptual_weight; }
naive_math_impl.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <algorithm> #include <cmath> template <typename type> static void basic_trans_mat_to_c4(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 3) / 4 * 4; int k_round = (K + 3) / 4 * 4; if (!pack_k) { k_round = K; } const int m_loop = m_round / 4; type* zero_buf = new type[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 4 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; if (4 * (i + 1) - M > 0) { switch (4 * (i + 1) - M) { case 3: in1 = zero_buf; case 2: in2 = zero_buf; case 1: in3 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } delete[] zero_buf; } template <typename type> static void basic_trans_mat_to_c8(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 7) / 8 * 8; int k_round = (K + 7) / 8 * 8; if (!pack_k) { k_round = K; } const int m_loop = m_round / 8; type zero_buf[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 8 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; const type* in4 = in3 + ldin; const type* in5 = in4 + ldin; const type* in6 = in5 + ldin; const type* in7 = in6 + ldin; if (8 * (i + 1) - M > 0) { switch (8 * (i + 1) - M) { case 7: in1 = zero_buf; case 6: in2 = zero_buf; case 5: in3 = zero_buf; case 4: in4 = zero_buf; case 3: in5 = zero_buf; case 2: in6 = zero_buf; case 1: in7 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; *output++ = *in4++; *output++ = *in5++; *output++ = *in6++; *output++ = *in7++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } } template <typename type, typename type2> static void basic_gemm_c4(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm_c8(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, int flag_act = false, float six = 6.f, float leakey_relu_alpha = 1.f, float scale = 6.f, float offset = 3.f, float threshold = 6.f) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data; if (flag_act > 0) { if (flag_act == 1) { // relu c[i * ldc + j] = tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0); } else if (flag_act == 2) { // relu 6 c[i * ldc + j] = tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0); c[i * ldc + j] = c[i * ldc + j] < static_cast<type2>(six) ? c[i * ldc + j] : static_cast<type2>(six); } else if (flag_act == 4) { // leaky relu c[i * ldc + j] = tmp < static_cast<type2>(0) ? static_cast<type2>(tmp * leakey_relu_alpha) : tmp; } else if (flag_act == 10) { // hard swish auto tmp1 = tmp + offset; if (tmp1 > 0) { if (tmp1 < threshold) { c[i * ldc + j] = static_cast<type2>(tmp1 * tmp * 1.0 / scale); } else { c[i * ldc + j] = static_cast<type2>(threshold * tmp * 1.0 / scale); } } else { if (threshold > 0) { c[i * ldc + j] = static_cast<type2>(0); } else { c[i * ldc + j] = static_cast<type2>(threshold * tmp * 1.0 / scale); } } } } else { c[i * ldc + j] = tmp; } } } } template <typename type, typename type2> static void basic_gemv(int m, int k, const type* a, const type* b, const type2* bias, type2* c, type2 alpha, type2 beta, bool trans_a = false, bool flag_bias = false, int flag_act = false, float six = 6.f, float leakey_relu_alpha = 1.f, float scale = 6.f, float offset = 3.f, float threshold = 6.f) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } auto sum = static_cast<type2>(0); for (int j = 0; j < k; ++j) { type av; if (trans_a) { av = a[j * m + i]; } else { av = a[i * k + j]; } sum += av * b[j]; } type2 tmp = alpha * sum + beta * c[i] + bias_data; if (flag_act > 0) { if (flag_act == 1) { // relu c[i] = tmp > (type2)0 ? tmp : (type2)0; } else if (flag_act == 2) { // relu 6 c[i] = tmp > (type2)0 ? tmp : (type2)0; c[i] = c[i] < six ? c[i] : six; // ut compute } else if (flag_act == 4) { // leakey relu c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp; } else if (flag_act == 10) { // hard_swish c[i] = std::min(static_cast<type2>(threshold), std::max(static_cast<type2>(0), static_cast<type2>(tmp + offset))) * static_cast<type2>(tmp * 1.0 / scale); } } else { c[i] = tmp; } } } /** * \brief basic direct convolution function */ //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> static void conv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, int act_type, float six = 6.f, float scale = 1.f, const float hard_scale = 6.f, const float offset = 3.f, const float threshold = 6.f) { Dtype2 beta = 0; auto src_data = din; auto dst_data_ref = dout; auto weights_data = weights; auto with_bias = flag_bias; auto bias_data = bias; int in_num = num; int out_channels = chout; int out_h = hout; int out_w = wout; int in_channel = chin; int in_h = hin; int in_w = win; int out_c_group = out_channels / group; int in_c_group = in_channel / group; for (int n = 0; n < in_num; ++n) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < out_h; ++oh) { for (int ow = 0; ow < out_w; ++ow) { int out_idx = n * group * out_c_group * out_h * out_w + g * out_c_group * out_h * out_w + oc * out_h * out_w + oh * out_w + ow; Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0; dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta; for (int ic = 0; ic < in_c_group; ++ic) { for (int kh = 0; kh < kernel_h; ++kh) { for (int kw = 0; kw < kernel_w; ++kw) { int iw = ow * stride_w - pad_w + kw * (dila_w); int ih = oh * stride_h - pad_h + kh * (dila_h); if (iw < 0 || iw >= in_w) continue; if (ih < 0 || ih >= in_h) continue; int iidx = n * in_channel * in_h * in_w + g * in_c_group * in_h * in_w + ic * in_h * in_w + ih * in_w + iw; int widx = g * out_c_group * in_c_group * kernel_h * kernel_w + oc * in_c_group * kernel_h * kernel_w + ic * kernel_h * kernel_w + kh * kernel_w + kw; dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx]; } } } if (act_type > 0) { // 1-relu 2-relu6 4-leakyrelu if (act_type == 1) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; } else if (act_type == 2) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six ? dst_data_ref[out_idx] : (Dtype2)six; } else if (act_type == 4) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)(dst_data_ref[out_idx] * scale); } else if (act_type == 10) { auto tmp = dst_data_ref[out_idx] + offset; auto tmp1 = dst_data_ref[out_idx] * 1.0 / hard_scale; if (tmp > 0) { if (tmp < threshold) { dst_data_ref[out_idx] = static_cast<Dtype2>(tmp * tmp1); } else { dst_data_ref[out_idx] = static_cast<Dtype2>(threshold * tmp1); } } else { if (threshold > 0) { dst_data_ref[out_idx] = static_cast<Dtype2>(0); } else { dst_data_ref[out_idx] = static_cast<Dtype2>(threshold * tmp1); } } } else { printf("this act type: %d does not support \n", act_type); } } } } } } } } template <typename Dtype> static void fill_bias_relu(Dtype* tensor, const Dtype* bias, int channel, int channel_size, bool flag_bias, bool flag_relu) { Dtype* data = tensor; for (int j = 0; j < channel; ++j) { Dtype bias_c = flag_bias ? bias[j] : 0; for (int i = 0; i < channel_size; i++) { data[i] += bias_c; if (flag_relu) { data[i] = data[i] > 0 ? data[i] : 0.f; } } data += channel_size; } } template <typename Dtype> static void do_relu(Dtype* tensor, int size) { for (int j = 0; j < size; ++j) { tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0; } } inline bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template <typename Dtype> static void col2im(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h0, const int pad_h1, const int pad_w0, const int pad_w1, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { memset(data_im, 0, height * width * channels * sizeof(Dtype)); const int output_h = (height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int channel_size = height * width; for (int channel = channels; channel--; data_im += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h0 + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!is_a_ge_zero_and_a_lt_b(input_row, height)) { data_col += output_w; } else { int input_col = -pad_w0 + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (is_a_ge_zero_and_a_lt_b(input_col, width)) { data_im[input_row * width + input_col] += *data_col; } data_col++; input_col += stride_w; } } input_row += stride_h; } } } } } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deconv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, bool flag_bias, bool flag_relu) { int m = chout * kernel_w * kernel_h / group; int n = hin * win; int k = chin / group; int group_size_in = win * hin * chin / group; int group_size_coldata = m * n; int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group); bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) && (stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) && (dila_h == 1); Dtype2* workspace_ptr = static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group)); for (int i = 0; i < num; ++i) { const Dtype1* din_batch = din + i * chin * hin * win; Dtype2* dout_batch = dout + i * chout * hout * wout; Dtype2* col_data = workspace_ptr; if (flag_1x1s1p1) { col_data = dout_batch; } memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group); for (int g = 0; g < group; ++g) { const Dtype1* din_group = din_batch + g * group_size_in; const Dtype1* weights_group = weights + g * group_size_weights; Dtype2* coldata_group = col_data + g * group_size_coldata; basic_gemm<Dtype1, Dtype2>(true, false, m, n, k, 1, weights_group, m, din_group, n, 0, coldata_group, n, nullptr, false, false); } if (!flag_1x1s1p1) { col2im(col_data, chout, hout, wout, kernel_h, kernel_w, pad_h0, pad_h1, pad_w0, pad_w1, stride_h, stride_w, dila_h, dila_w, dout_batch); } //! add bias if (flag_bias || flag_relu) { fill_bias_relu( dout_batch, bias, chout, wout * hout, flag_bias, flag_relu); } } free(workspace_ptr); } float deformable_bilinear(const float* bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; if (h_low >= height - 1) { h_high = h_low = height - 1; h = static_cast<float>(h_low); } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = static_cast<float>(w_low); } else { w_high = w_low + 1; } float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh; float hw = 1 - lw; float v1 = bottom_data[h_low * data_width + w_low]; float v2 = bottom_data[h_low * data_width + w_high]; float v3 = bottom_data[h_high * data_width + w_low]; float v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw; float w2 = hh * lw; float w3 = lh * hw; float w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deformable_conv_basic(const Dtype1* in_data, const float* offset_data, const float* mask_data, Dtype2* out_data, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, bool flag_relu, bool modulated) { int out_c_group = chout / group; int in_c_group = chin / group; int in_size = hin * win; int out_size = hout * wout; int c_in_size = chin * in_size; int c_out_size = chout * out_size; int kernel_size = kernel_w * kernel_h; for (int n = 0; n < num; n++) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < hout; oh++) { for (int ow = 0; ow < wout; ow++) { int out_idx = n * c_out_size + g * out_c_group * out_size + oc * out_size + oh * wout + ow; Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0; out_data[out_idx] = bias_d + out_data[out_idx]; for (int ic = 0; ic < in_c_group; ++ic) { for (int fh = 0; fh < kernel_h; fh++) { for (int fw = 0; fw < kernel_w; fw++) { const float* offset_data_ptr = offset_data + n * group * 2 * kernel_size * out_size + g * 2 * kernel_size * out_size; const int data_offset_h_ptr = ((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow; const int data_offset_w_ptr = ((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow; const float offset_h = offset_data_ptr[data_offset_h_ptr]; const float offset_w = offset_data_ptr[data_offset_w_ptr]; const float iw = ow * stride_w - pad_w + kernel_w * dila_w + offset_w; const float ih = oh * stride_h - pad_h + kernel_h * dila_h + offset_h; if (ih >= 0 && ih < hin && iw >= 0 && iw < win) { const float map_h = kernel_h * dila_h + offset_h; const float map_w = kernel_w * dila_w + offset_w; const int cur_height = hin - (oh * stride_h - pad_h); const int cur_width = win - (ow * stride_w - pad_w); const float* in_data_offset = in_data + n * c_in_size + (g * in_c_group + ic) * in_size + (oh * stride_h - pad_h) * win + (ow * stride_w - pad_w); float val = deformable_bilinear(in_data_offset, win, cur_height, cur_width, map_h, map_w); if (modulated) { // use mask const float* mask_ptr = mask_data + n * group * kernel_size * out_size + g * kernel_size * out_size + (fh * kernel_w + fw) * hout * wout + oh * wout + ow; val *= mask_ptr[0]; } int widx = g * out_c_group * in_c_group * kernel_size + oc * in_c_group * kernel_size + ic * kernel_size + fh * kernel_w + fw; out_data[out_idx] += val * weights[widx]; } } } } if (flag_relu) { out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0; } } } } } } }
LSBasics.h
#include "graph.h" #pragma once struct DFSData { int *id2dfs; //maps vertex ids to post-order numbers int *dfs2id; //maps post-order numbers to vertex ids int *id2parc; //maps vertex ids to parent arcs in the dfs tree DFSData(int n) { id2dfs = new int [n+1]; dfs2id = new int [n+1]; id2parc = new int [n+1]; } ~DFSData() { delete [] id2parc; delete [] dfs2id; delete [] id2dfs; //fprintf (stderr, "Deleted DFS data.\n"); //fflush(stderr); } }; class GlobalInfo { EdgeCost bestfound; int solved; public: int bbpruned; //true iff bb pruned at a node that was not yet solved EdgeCost fixed; //hack because bb cannot handle fixed costs // EdgeCost UpdateBestFound(EdgeCost bf) { double answer = bestfound; if (bf != answer) { #pragma omp critical { if (bf < bestfound) { bestfound = bf; fprintf (stderr, "[[[ %.2f ]]] ", bestfound); } answer = bestfound; } } return answer; } inline bool IsSolved() { return (solved!=0); } void MakeSolved() { solved = 1; } GlobalInfo() { bestfound = INFINITE_COST; fixed = 0; solved = 0; bbpruned = 0; } }; class CutRecorder { public: vector<int> cutlist; void Reset() { cutlist.clear(); } void Reset(int size) { cutlist.reserve(size); cutlist.clear(); } inline void AddArc(int alabel) { cutlist.push_back(alabel); } inline void CloseCut() { cutlist.push_back(-1); } }; class GraphMapper { private: void Init() { oldn = oldm = 0; v2new = e2new = NULL; } public: int *v2new; int *e2new; int oldn, oldm; GraphMapper() { Init(); } void Reset(int _oldn, int _oldm) { oldn = _oldn; oldm = _oldm; v2new = new int [oldn+1]; e2new = new int [oldm+1]; for (int e=1; e<=oldm; e++) e2new[e] = -1; for (int v=1; v<=oldn; v++) v2new[v] = -1; } ~GraphMapper() { if (v2new) delete [] v2new; if (e2new) delete [] e2new; } void Destroy() { if (v2new) delete [] v2new; if (e2new) delete [] e2new; Init(); } }; class Basics { public: static void ReportResults (FILE *file, const string &prefix, double seconds, EdgeCost solvalue, EdgeCost bestknown) { fprintf (file, "%ssolution %.20f\n", prefix.c_str(), (double)solvalue); fprintf(file, "%stimeus %.3f\n", prefix.c_str(), 1000000.0 * seconds); fprintf(file, "%stimems %.6f\n", prefix.c_str(), 1000.0 * seconds); fprintf(file, "%stimes %.9f\n", prefix.c_str(), seconds); double ratio = (double)solvalue / (double)bestknown; double error = ratio - 1; fprintf(file, "%sratio %.20f\n", prefix.c_str(), ratio); fprintf(file, "%serror %.20f\n", prefix.c_str(), error); fprintf(file, "%spcterror %.20f\n", prefix.c_str(), 100.0 * error); } static void fatal (const string &msg) { fprintf (stderr, "ERROR: %s.\n", msg.c_str()); fflush(stderr); exit(-1); } // this is old; the terminal is not random static int WrongPickRandomTerminal(Graph &g) { //fprintf (stderr, "r"); int n = g.VertexCount(); for (int v=1; v<=n; v++) if (g.IsTerminal(v)) return v; fatal ("could not find terminal"); return 0; } static int PickRandomTerminal(Graph &g) { //fprintf (stderr, "r"); int n = g.VertexCount(); int count = 0; int target = RFWRandom::getInteger(1,g.TerminalCount()); for (int v=1; v<=n; v++) { if (g.IsTerminal(v)) { if (++count == target) return v; } } fatal ("could not find terminal"); return 0; } static int PickRandomTerminal(Graph &g, RFWLocalRandom &random) { static bool first = true; if (first) { fprintf (stderr, "PICKRANDOMTERMINAL IS NOT PROPERLY SET.\n"); first = false; } int n = g.VertexCount(); int count = 0; int target = random.GetInteger(1,g.TerminalCount()); for (int v=1; v<=n; v++) { if (g.IsTerminal(v)) { if (++count == target) return v; } } fatal ("could not find terminal"); return 0; } /// <summary> /// Perform DFS on the solution, numbering vertices in reverse post-order. /// Returns the number of vertices visited. /// </summary> /// <param name="r">Root of DFS.</param> /// <param name="solution">Current solution.</param> /// <param name="dfs2id">Output: map from dfs number to id (-1 if not visited)</param> /// <param name="id2dfs">Output: map from id to dfs number (-1 if not visited)</param> /// <param name="id2parc">Output: map from it to parent arc (0 if not visited)</param> static int DFS (Graph &g, int r, SteinerSolution &solution, DFSData &dfsdata, RFWStack<int> &stack) { // this is a funny implementation of dfs: when we first scan a vertex, we simply add to the stack // every nonscanned neighbor---even those that are already in the stack. This requires a stack of size m. // WARNING! IF WE ARE ONLY SCANNING EDGES OF THE SOLUTION, THE SIZE IS N int *id2dfs = dfsdata.id2dfs; int *dfs2id = dfsdata.dfs2id; int *id2parc = dfsdata.id2parc; int n = g.VertexCount(); int m = g.EdgeCount(); stack.reset(); //id2dfs: -1:unreached 0:scanned >0:processed for (int v=0; v<=n; v++) { id2dfs[v] = -1; //everybody unreached, initially dfs2id[v] = -1; id2parc[v] = 0; } stack.push(r); int nextdfs = 1; while (!stack.isEmpty()) { int v = stack.pop(); int vdfs = id2dfs[v]; if (vdfs > 0) {continue;} //vertex already processed: nothing else to do //vertex already scanned, but with no label; we assign it a label now if (vdfs == 0) { id2dfs[v] = nextdfs; dfs2id[nextdfs] = v; nextdfs++; continue; } //vertex not yet scanned: scan it, put it back on the stack (a label will be assigned later) stack.push(v); id2dfs[v] = 0; //foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) { SPGArc *a, *end; //for (int pa=g.GetStart(v); pa<g.GetEnd(v); pa++) { for (g.GetBounds(v,a,end); a<end; a++) { int alabel = a->label; //g.GetArcLabel(pa); if (!solution.Contains(alabel)) continue; int w = a->head; //g.GetArcHead(pa);//arc.head; if (id2dfs[w] >= 0) continue; //w already scanned: no need to go there again id2parc[w] = alabel; stack.push(w); } } return nextdfs - 1; } // add all vertices in the current solution to solnodes // (vertices with incident edges) static void MarkSolutionNodes(Graph &g, SteinerSolution &solution, UniverseSet &solnodes) { int n = g.VertexCount(); for (int v=1; v<=n; v++) { if (solution.GetDegree(v)>0) solnodes.Insert(v); } } //mark the components containing the elements of the stack static void MarkComponent(Graph &g, RFWStack<int> &stack, int *id2parc, UniverseSet &marked) { //Console.Error.Write("+"); //Invariant: a vertex becomes marked when it is inserted into the stack //A marked vertex is or was on the stack. bool verbose = false; if (verbose) fprintf (stderr, "Marking component from %d vertices; marked has %d.", stack.getNElements(), marked.Count()); //make invariants true for original vertices for (int i = stack.getNElements(); i >= 1; i--) { int v = stack.peek(i); if (marked.Contains(v)) fprintf (stderr, "BAD"); marked.Insert(v); } int mcount = marked.Count(); //add all relevant tree children to the stack while (!stack.isEmpty()) { int v = stack.pop(); SPGArc *a, *end; for (g.GetBounds(v,a,end); a<end; a++) { //for (int pa=g.GetStart(v); pa<g.GetEnd(v); pa++) { int w = a->head; //g.GetArcHead(pa); if (id2parc[w]==a->label) { //g.GetArcLabel(pa)) { if (marked.Insert(w)) { //mcount ++; stack.push(w); } } } /* foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) { int w = arc.head; if (id2parc[w]==arc.label) { if (marked.Insert(w)) stack.Push(w); } }*/ } mcount = marked.Count() - mcount; if (verbose) fprintf(stderr, "%d elements marked", mcount); /* if (verbose) {Console.Error.WriteLine(" {0} elements marked.", marked.Count()); foreach (int e in marked.ElementEnumerator()) {Console.Error.Write("+{0} ", e);}}*/ } static void CheckSolution(Graph &g, SteinerSolution &solution) { if (!InnerCheck(g, solution, false)) { fatal ("Invalid solution"); } } static bool InnerCheck(Graph &g, SteinerSolution &solution, bool verbose) { int n = g.VertexCount(); UniverseSet svertices(n); UnionFind uf(n); Basics::MarkSolutionNodes(g, solution, svertices); int ncomp = svertices.Count(); UniverseSet terminals(n); for (int t=1; t<=g.VertexCount(); t++) { if (g.IsTerminal(t)) terminals.Insert(t); } //Console.Error.WriteLine("Term //foreach (int e in solution.ElementEnumerator()) int m = g.EdgeCount(); int ecount = 0; for (int e=1; e<=m; e++) { if (!solution.Contains(e)) continue; ecount ++; int v, w; g.GetEndpoints(e, v, w); if (!svertices.Contains(v) || !svertices.Contains(w)) { fprintf (stderr, "Edge %d=(%d,%d), membership %d %d.\n", e, v, w, svertices.Contains(v), svertices.Contains(w)); fatal ("Inconsistent vertex membership in solution"); } terminals.Remove(v); terminals.Remove(w); if (uf.Find(v) == uf.Find(w)) { fprintf (stderr, "Vertices %d, %d already in the same component.", v, w); fatal ("Solution has a cycle."); } else { uf.Union(v, w); ncomp --; } } if (terminals.Count() > 0) { fprintf (stderr, "Terminals not in solution: %d.", terminals.Count()); fatal ("Missing terminals in solution."); } fprintf (stderr, "[CHECKING:n=%d:m=%d:%d components]", svertices.Count(), ecount, ncomp); if (verbose) { for (int v=1; v<=n; v++) { if (svertices.Contains(v)) { fprintf (stderr, "%d:%d ", v, uf.Find(v)); } } fprintf (stderr, "\n"); } if (ecount != svertices.Count() - 1) return false; else return true; } /// <summary> /// Compute the Voronoi diagram of the current graph, given a set of bases /// and maybe the perturbed cost of the edges. /// </summary> /// <param name="voronoi">Output: description of the Voronoi diagram</param> /// <param name="baselist">List of bases.</param> /// <param name="heap">Preallocated heap to be used in the computation (will be reset)</param> /// <param name="pertcost">Edge costs (use original costs if null).</param> static void ComputeVoronoi(Graph &g, VoronoiData &voronoi, UniverseSet &baselist, BinaryHeap<EdgeCost> &heap, EdgeCost *pertcost) { const bool GLOBAL_USE_VORONOI_TIE_BREAKER = false; // NOT CLEAR WHERE THIS THING IS SUPPOSED TO BE DEFINED const bool verbose = false; voronoi.Reset(); int nbases = 0; heap.Reset(); // initialize with all bases int p, pend; for (baselist.GetBounds(p,pend); p<pend; p++) { int b = baselist.PickPos(p); nbases ++; voronoi.MakeBase(b); heap.Insert(b, 0); } if (verbose) fprintf (stderr, "%d vertices marked as bases.\n", nbases); int count = 0; //WARNING: RANDOMIZING THE CHOICE SEEMS TO BE A GOOD IDEA bool randomize = false; bool PREFER_TERMINALS = true; bool USE_TIEBREAKERS = GLOBAL_USE_VORONOI_TIE_BREAKER && (randomize || PREFER_TERMINALS); //perform multisource Dijkstra while (!heap.IsEmpty()) { unsigned int v; EdgeCost dist; heap.RemoveFirst(v, dist); count++; if (verbose) fprintf (stderr, "%d ", dist); //foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) { SPGArc *a, *end; for (g.GetBounds(v,a,end); a<end; a++) { int w = a->head; //g.GetArcHead(pa); EdgeCost newdist = dist; if (pertcost == NULL) newdist += a->cost; //g.GetArcCost(pa); else newdist += pertcost[a->label]; bool improve = false; if (voronoi.GetBase(w) == 0) improve = true; else if (newdist <= voronoi.GetDistance(w)) improve = true; //using leq here to prefer shorter edges... else if (USE_TIEBREAKERS && newdist == voronoi.GetDistance(w)) { if (randomize) { fatal ("randomization not implemented!\n"); //(stderr, "NOT IMPLEMENTED!\n //improve = (random.GetInteger(0, 1) == 0); //(arc.cost < g.GetCost(voronoi.GetParentArc(w))); } else if (PREFER_TERMINALS) { improve = g.IsTerminal(voronoi.GetBase(v)); } //improve = (arc.cost < g.GetCost(voronoi.GetParentArc(w))); } if (improve) { //make w a tentative child of v heap.Insert(w, newdist); voronoi.Update(w, voronoi.GetBase(v), a->label, newdist); } } } } /// Iteratively removes all degree-one vertices in the solution. /// Takes O(1) if there are no such vertices. If there are, takes /// O(n) + degree of all vertices removed. /// <param name="solution">Original solution (will be modified).</param> static void Prune(Graph &g, SteinerSolution &solution) { if (solution.LeafCount()==0) return; //WARNING: THIS SHOULD BE THERE! int n = g.VertexCount(); for (int v=1; v<=n; v++) { int t = v; while (solution.GetDegree(t)==1 && !g.IsTerminal(t)) { //find the unique incident solution edge SPGArc *a, *end; for (g.GetBounds(t,a,end); a<end; a++) { int alabel = a->label; if (!solution.Contains(alabel)) continue; solution.Remove(alabel); t = g.GetOther(alabel,t); //process the other endpoint next break; } } } } };
GB_binop__isle_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32) // A*D function (colscale): GB (_AxD__isle_uint32) // D*A function (rowscale): GB (_DxB__isle_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32) // C=scalar+B GB (_bind1st__isle_uint32) // C=scalar+B' GB (_bind1st_tran__isle_uint32) // C=A+scalar GB (_bind2nd__isle_uint32) // C=A'+scalar GB (_bind2nd_tran__isle_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fc32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_int16 // op(A') function: GB_unop_tran__identity_fc32_int16 // C type: GxB_FC32_t // A type: int16_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_int16 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FillInLinearSystemImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/pipelines/kernel/FillInLinearSystem.h" #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) #include "open3d/t/pipelines/kernel/SVD3x3CUDA.cuh" #else #include "open3d/t/pipelines/kernel/SVD3x3CPU.h" #endif namespace open3d { namespace t { namespace pipelines { namespace kernel { #if defined(__CUDACC__) void FillInRigidAlignmentTermCUDA #else void FillInRigidAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_ps, const core::Tensor &Tj_qs, const core::Tensor &Ri_normal_ps, int i, int j, float threshold) { core::Device device = AtA.GetDevice(); int64_t n = Ti_ps.GetLength(); if (Tj_qs.GetLength() != n || Ri_normal_ps.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } // First fill in a small 12 x 12 linear system core::Tensor AtA_local = core::Tensor::Zeros({12, 12}, core::Dtype::Float32, device); core::Tensor Atb_local = core::Tensor::Zeros({12}, core::Dtype::Float32, device); float *AtA_local_ptr = static_cast<float *>(AtA_local.GetDataPtr()); float *Atb_local_ptr = static_cast<float *>(Atb_local.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const float *Ti_ps_ptr = static_cast<const float *>(Ti_ps.GetDataPtr()); const float *Tj_qs_ptr = static_cast<const float *>(Tj_qs.GetDataPtr()); const float *Ri_normal_ps_ptr = static_cast<const float *>(Ri_normal_ps.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *p_prime = Ti_ps_ptr + 3 * workload_idx; const float *q_prime = Tj_qs_ptr + 3 * workload_idx; const float *normal_p_prime = Ri_normal_ps_ptr + 3 * workload_idx; float r = (p_prime[0] - q_prime[0]) * normal_p_prime[0] + (p_prime[1] - q_prime[1]) * normal_p_prime[1] + (p_prime[2] - q_prime[2]) * normal_p_prime[2]; if (abs(r) > threshold) return; float J_ij[12]; J_ij[0] = -q_prime[2] * normal_p_prime[1] + q_prime[1] * normal_p_prime[2]; J_ij[1] = q_prime[2] * normal_p_prime[0] - q_prime[0] * normal_p_prime[2]; J_ij[2] = -q_prime[1] * normal_p_prime[0] + q_prime[0] * normal_p_prime[1]; J_ij[3] = normal_p_prime[0]; J_ij[4] = normal_p_prime[1]; J_ij[5] = normal_p_prime[2]; for (int k = 0; k < 6; ++k) { J_ij[k + 6] = -J_ij[k]; } // Not optimized; Switch to reduction if necessary. #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { atomicAdd(&AtA_local_ptr[i_local * 12 + j_local], J_ij[i_local] * J_ij[j_local]); } atomicAdd(&Atb_local_ptr[i_local], J_ij[i_local] * r); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical { for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { AtA_local_ptr[i_local * 12 + j_local] += J_ij[i_local] * J_ij[j_local]; } Atb_local_ptr[i_local] += J_ij[i_local] * r; } *residual_ptr += r * r; } #endif }); // Then fill-in the large linear system std::vector<int64_t> indices_vec(12); for (int k = 0; k < 6; ++k) { indices_vec[k] = i * 6 + k; indices_vec[k + 6] = j * 6 + k; } std::vector<int64_t> indices_i_vec; std::vector<int64_t> indices_j_vec; for (int local_i = 0; local_i < 12; ++local_i) { for (int local_j = 0; local_j < 12; ++local_j) { indices_i_vec.push_back(indices_vec[local_i]); indices_j_vec.push_back(indices_vec[local_j]); } } core::Tensor indices(indices_vec, {12}, core::Dtype::Int64, device); core::Tensor indices_i(indices_i_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor indices_j(indices_j_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor AtA_sub = AtA.IndexGet({indices_i, indices_j}); AtA.IndexSet({indices_i, indices_j}, AtA_sub + AtA_local.View({12 * 12})); core::Tensor Atb_sub = Atb.IndexGet({indices}); Atb.IndexSet({indices}, Atb_sub + Atb_local.View({12, 1})); } #if defined(__CUDACC__) void FillInSLACAlignmentTermCUDA #else void FillInSLACAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_Cps, const core::Tensor &Tj_Cqs, const core::Tensor &Cnormal_ps, const core::Tensor &Ri_Cnormal_ps, const core::Tensor &RjT_Ri_Cnormal_ps, const core::Tensor &cgrid_idx_ps, const core::Tensor &cgrid_idx_qs, const core::Tensor &cgrid_ratio_qs, const core::Tensor &cgrid_ratio_ps, int i, int j, int n_frags, float threshold) { int64_t n = Ti_Cps.GetLength(); if (Tj_Cqs.GetLength() != n || Cnormal_ps.GetLength() != n || Ri_Cnormal_ps.GetLength() != n || RjT_Ri_Cnormal_ps.GetLength() != n || cgrid_idx_ps.GetLength() != n || cgrid_ratio_ps.GetLength() != n || cgrid_idx_qs.GetLength() != n || cgrid_ratio_qs.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } int n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); // Geometric properties const float *Ti_Cps_ptr = static_cast<const float *>(Ti_Cps.GetDataPtr()); const float *Tj_Cqs_ptr = static_cast<const float *>(Tj_Cqs.GetDataPtr()); const float *Cnormal_ps_ptr = static_cast<const float *>(Cnormal_ps.GetDataPtr()); const float *Ri_Cnormal_ps_ptr = static_cast<const float *>(Ri_Cnormal_ps.GetDataPtr()); const float *RjT_Ri_Cnormal_ps_ptr = static_cast<const float *>(RjT_Ri_Cnormal_ps.GetDataPtr()); // Association properties const int *cgrid_idx_ps_ptr = static_cast<const int *>(cgrid_idx_ps.GetDataPtr()); const int *cgrid_idx_qs_ptr = static_cast<const int *>(cgrid_idx_qs.GetDataPtr()); const float *cgrid_ratio_ps_ptr = static_cast<const float *>(cgrid_ratio_ps.GetDataPtr()); const float *cgrid_ratio_qs_ptr = static_cast<const float *>(cgrid_ratio_qs.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *Ti_Cp = Ti_Cps_ptr + 3 * workload_idx; const float *Tj_Cq = Tj_Cqs_ptr + 3 * workload_idx; const float *Cnormal_p = Cnormal_ps_ptr + 3 * workload_idx; const float *Ri_Cnormal_p = Ri_Cnormal_ps_ptr + 3 * workload_idx; const float *RjTRi_Cnormal_p = RjT_Ri_Cnormal_ps_ptr + 3 * workload_idx; const int *cgrid_idx_p = cgrid_idx_ps_ptr + 8 * workload_idx; const int *cgrid_idx_q = cgrid_idx_qs_ptr + 8 * workload_idx; const float *cgrid_ratio_p = cgrid_ratio_ps_ptr + 8 * workload_idx; const float *cgrid_ratio_q = cgrid_ratio_qs_ptr + 8 * workload_idx; float r = (Ti_Cp[0] - Tj_Cq[0]) * Ri_Cnormal_p[0] + (Ti_Cp[1] - Tj_Cq[1]) * Ri_Cnormal_p[1] + (Ti_Cp[2] - Tj_Cq[2]) * Ri_Cnormal_p[2]; if (abs(r) > threshold) return; // Now we fill in a 60 x 60 sub-matrix: 2 x (6 + 8 x 3) float J[60]; int idx[60]; // Jacobian w.r.t. Ti: 0-6 J[0] = -Tj_Cq[2] * Ri_Cnormal_p[1] + Tj_Cq[1] * Ri_Cnormal_p[2]; J[1] = Tj_Cq[2] * Ri_Cnormal_p[0] - Tj_Cq[0] * Ri_Cnormal_p[2]; J[2] = -Tj_Cq[1] * Ri_Cnormal_p[0] + Tj_Cq[0] * Ri_Cnormal_p[1]; J[3] = Ri_Cnormal_p[0]; J[4] = Ri_Cnormal_p[1]; J[5] = Ri_Cnormal_p[2]; // Jacobian w.r.t. Tj: 6-12 for (int k = 0; k < 6; ++k) { J[k + 6] = -J[k]; idx[k + 0] = 6 * i + k; idx[k + 6] = 6 * j + k; } // Jacobian w.r.t. C over p: 12-36 for (int k = 0; k < 8; ++k) { J[12 + k * 3 + 0] = cgrid_ratio_p[k] * Cnormal_p[0]; J[12 + k * 3 + 1] = cgrid_ratio_p[k] * Cnormal_p[1]; J[12 + k * 3 + 2] = cgrid_ratio_p[k] * Cnormal_p[2]; idx[12 + k * 3 + 0] = 6 * n_frags + cgrid_idx_p[k] * 3 + 0; idx[12 + k * 3 + 1] = 6 * n_frags + cgrid_idx_p[k] * 3 + 1; idx[12 + k * 3 + 2] = 6 * n_frags + cgrid_idx_p[k] * 3 + 2; } // Jacobian w.r.t. C over q: 36-60 for (int k = 0; k < 8; ++k) { J[36 + k * 3 + 0] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[0]; J[36 + k * 3 + 1] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[1]; J[36 + k * 3 + 2] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[2]; idx[36 + k * 3 + 0] = 6 * n_frags + cgrid_idx_q[k] * 3 + 0; idx[36 + k * 3 + 1] = 6 * n_frags + cgrid_idx_q[k] * 3 + 1; idx[36 + k * 3 + 2] = 6 * n_frags + cgrid_idx_q[k] * 3 + 2; } // Not optimized; Switch to reduction if necessary. #if defined(__CUDACC__) for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { float AtA_ij = J[ki] * J[kj]; int ij = idx[ki] * n_vars + idx[kj]; atomicAdd(AtA_ptr + ij, AtA_ij); } float Atb_i = J[ki] * r; atomicAdd(Atb_ptr + idx[ki], Atb_i); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical { for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { AtA_ptr[idx[ki] * n_vars + idx[kj]] += J[ki] * J[kj]; } Atb_ptr[idx[ki]] += J[ki] * r; } *residual_ptr += r * r; } #endif }); } inline OPEN3D_HOST_DEVICE void matmul3x3_3x1(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22, float v0, float v1, float v2, float &o0, float &o1, float &o2) { o0 = m00 * v0 + m01 * v1 + m02 * v2; o1 = m10 * v0 + m11 * v1 + m12 * v2; o2 = m20 * v0 + m21 * v1 + m22 * v2; } inline OPEN3D_HOST_DEVICE void matmul3x3_3x3(float a00, float a01, float a02, float a10, float a11, float a12, float a20, float a21, float a22, float b00, float b01, float b02, float b10, float b11, float b12, float b20, float b21, float b22, float &c00, float &c01, float &c02, float &c10, float &c11, float &c12, float &c20, float &c21, float &c22) { matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b00, b10, b20, c00, c10, c20); matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b01, b11, b21, c01, c11, c21); matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b02, b12, b22, c02, c12, c22); } inline OPEN3D_HOST_DEVICE float det3x3(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22) { return m00 * (m11 * m22 - m12 * m21) - m10 * (m01 * m22 - m02 - m21) + m20 * (m01 * m12 - m02 * m11); } #if defined(__CUDACC__) void FillInSLACRegularizerTermCUDA #else void FillInSLACRegularizerTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &grid_idx, const core::Tensor &grid_nbs_idx, const core::Tensor &grid_nbs_mask, const core::Tensor &positions_init, const core::Tensor &positions_curr, float weight, int n_frags, int anchor_idx) { int64_t n = grid_idx.GetLength(); int64_t n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const int *grid_idx_ptr = static_cast<const int *>(grid_idx.GetDataPtr()); const int *grid_nbs_idx_ptr = static_cast<const int *>(grid_nbs_idx.GetDataPtr()); const bool *grid_nbs_mask_ptr = static_cast<const bool *>(grid_nbs_mask.GetDataPtr()); const float *positions_init_ptr = static_cast<const float *>(positions_init.GetDataPtr()); const float *positions_curr_ptr = static_cast<const float *>(positions_curr.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { // Enumerate 6 neighbors int idx_i = grid_idx_ptr[workload_idx]; const int *idx_nbs = grid_nbs_idx_ptr + 6 * workload_idx; const bool *mask_nbs = grid_nbs_mask_ptr + 6 * workload_idx; // Build a 3x3 linear system to compute the local R float cov[3][3] = {{0}}; float U[3][3], V[3][3], S[3]; int cnt = 0; for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (!mask_k) continue; int idx_k = idx_nbs[k]; // Now build linear systems float diff_ik_init[3] = {positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = {positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; // Build linear system by computing XY^T when formulating Y = RX // Y: curr // X: init for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] += diff_ik_init[i] * diff_ik_curr[j]; } } ++cnt; } if (cnt < 3) { return; } // clang-format off svd(cov[0][0], cov[0][1], cov[0][2], cov[1][0], cov[1][1], cov[1][2], cov[2][0], cov[2][1], cov[2][2], U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2], S[0], S[1], S[2], V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]); // TODO: det3x3 and matmul3x3 float R[3][3]; // clang-format off matmul3x3_3x3(V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2], U[0][0], U[1][0], U[2][0], U[0][1], U[1][1], U[2][1], U[0][2], U[1][2], U[2][2], R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); float d = det3x3(R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); // clang-format on if (d < 0) { // clang-format off matmul3x3_3x3(V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2], U[0][0], U[1][0], U[2][0], U[0][1], U[1][1], U[2][1], -U[0][2], -U[1][2], -U[2][2], R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); // clang-format on } // Now we have R, we build Hessian and residuals // But first, we need to anchor a point if (idx_i == anchor_idx) { R[0][0] = R[1][1] = R[2][2] = 1; R[0][1] = R[0][2] = R[1][0] = R[1][2] = R[2][0] = R[2][1] = 0; } for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (mask_k) { int idx_k = idx_nbs[k]; float diff_ik_init[3] = { positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = { positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; float R_diff_ik_curr[3]; // clang-format off matmul3x3_3x1(R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2], diff_ik_init[0], diff_ik_init[1], diff_ik_init[2], R_diff_ik_curr[0], R_diff_ik_curr[1], R_diff_ik_curr[2]); // clang-format on float local_r[3]; local_r[0] = diff_ik_curr[0] - R_diff_ik_curr[0]; local_r[1] = diff_ik_curr[1] - R_diff_ik_curr[1]; local_r[2] = diff_ik_curr[2] - R_diff_ik_curr[2]; int offset_idx_i = 3 * idx_i + 6 * n_frags; int offset_idx_k = 3 * idx_k + 6 * n_frags; #if defined(__CUDACC__) // Update residual atomicAdd(residual_ptr, weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2])); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis], -weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis], -weight); // Update Atb: 2x1 atomicAdd(&Atb_ptr[offset_idx_i + axis], +weight * local_r[axis]); atomicAdd(&Atb_ptr[offset_idx_k + axis], -weight * local_r[axis]); } #else #pragma omp critical { // Update residual *residual_ptr += weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2]); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis] += weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis] += weight; AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis] -= weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis] -= weight; // Update Atb: 2x1 Atb_ptr[offset_idx_i + axis] += weight * local_r[axis]; Atb_ptr[offset_idx_k + axis] -= weight * local_r[axis]; } } #endif } } }); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
GB_concat_bitmap_template.c
//------------------------------------------------------------------------------ // GB_concat_bitmap_template: concatenate a tile into a bitmap matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and the tile A //-------------------------------------------------------------------------- const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; int8_t *restrict Cb = C->b ; //-------------------------------------------------------------------------- // copy the tile A into C //-------------------------------------------------------------------------- switch (GB_sparsity (A)) { case GxB_FULL : // A is full { int A_nthreads = GB_nthreads (anz, chunk, nthreads_max) ; int64_t pA ; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (pA = 0 ; pA < anz ; pA++) { int64_t i = pA % avlen ; int64_t j = pA / avlen ; int64_t iC = cistart + i ; int64_t jC = cvstart + j ; int64_t pC = iC + jC * cvlen ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; Cb [pC] = 1 ; } } break ; case GxB_BITMAP : // A is bitmap { int A_nthreads = GB_nthreads (anz, chunk, nthreads_max) ; const int8_t *restrict Ab = A->b ; int64_t pA ; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (pA = 0 ; pA < anz ; pA++) { if (Ab [pA]) { int64_t i = pA % avlen ; int64_t j = pA / avlen ; int64_t iC = cistart + i ; int64_t jC = cvstart + j ; int64_t pC = iC + jC * cvlen ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; Cb [pC] = 1 ; } } } break ; default : // A is sparse or hypersparse { int A_nthreads, A_ntasks ; GB_SLICE_MATRIX (A, 1, chunk) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; int64_t jC = cvstart + j ; int64_t pC_start = cistart + jC * cvlen ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; GB_PRAGMA_SIMD for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pC = pC_start + i ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; Cb [pC] = 1 ; } } } } break ; } done = true ; } #undef GB_CTYPE
lensing.c
/** @file lensing.c Documented lensing module * * Simon Prunet and Julien Lesgourgues, 6.12.2010 * * This module computes the lensed temperature and polarization * anisotropy power spectra \f$ C_l^{X}, P(k), ... \f$'s given the * unlensed temperature, polarization and lensing potential spectra. * * Follows Challinor and Lewis full-sky method, astro-ph/0502425 * * The following functions can be called from other modules: * * -# lensing_init() at the beginning (but after spectra_init()) * -# lensing_cl_at_l() at any time for computing Cl_lensed at any l * -# lensing_free() at the end */ #include "lensing.h" #include <time.h> /** * Anisotropy power spectra C_l's for all types, modes and initial conditions. * SO FAR: ONLY SCALAR * * This routine evaluates all the lensed C_l's at a given value of l by * picking it in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be called from whatever module at whatever time, * provided that lensing_init() has been called before, and * lensing_free() has not been called yet. * * @param ple Input : pointer to lensing structure * @param l Input : multipole number * @param cl_lensed Output: lensed C_l's for all types (TT, TE, EE, etc..) * @return the error status */ int lensing_cl_at_l( struct lensing * ple, int l, double * cl_lensed /* array with argument cl_lensed[index_ct] (must be already allocated) */ ) { int last_index; int index_lt; class_test(l > ple->l_lensed_max, ple->error_message, "you asked for lensed Cls at l=%d, they were computed only up to l=%d, you should increase l_max_scalars or decrease the precision parameter delta_l_max",l,ple->l_lensed_max); class_call(array_interpolate_spline(ple->l, ple->l_size, ple->cl_lens, ple->ddcl_lens, ple->lt_size, l, &last_index, cl_lensed, ple->lt_size, ple->error_message), ple->error_message, ple->error_message); /* set to zero for the types such that l<l_max */ for (index_lt=0; index_lt<ple->lt_size; index_lt++) if ((int)l > ple->l_max_lt[index_lt]) cl_lensed[index_lt]=0.; return _SUCCESS_; } /** * This routine initializes the lensing structure (in particular, * computes table of lensed anisotropy spectra \f$ C_l^{X} \f$) * * @param ppt Input : pointer to perturbation structure (just in case, not used in current version...) * @param psp Input : pointer to spectra structure * @param pnl Input : pointer to nonlinear structure * @param ple Output: pointer to initialized lensing structure * @return the error status */ int lensing_init( struct precision * ppr, struct perturbs * ppt, struct spectra * psp, struct nonlinear * pnl, struct lensing * ple ) { /** local variables */ double * mu; /* mu[index_mu]: discretized values of mu between -1 and 1, roots of Legendre polynomial */ double * w8; /* Corresponding Gauss-Legendre quadrature weights */ double theta,delta_theta; double ** d00; /* dmn[index_mu][index_l] */ double ** d11; double ** d2m2; double ** d22; double ** d20; double ** d1m1; double ** d31; double ** d40; double ** d3m1; double ** d3m3; double ** d4m2; double ** d4m4; double * buf_dxx; /* buffer */ double * Cgl; /* Cgl[index_mu] */ double * Cgl2; /* Cgl2[index_mu] */ double * sigma2; /* sigma[index_mu] */ double * ksi; /* ksi[index_mu] */ double * ksiX; /* ksiX[index_mu] */ double * ksip; /* ksip[index_mu] */ double * ksim; /* ksim[index_mu] */ double fac,fac1; double X_000; double X_p000; double X_220; double X_022; double X_p022; double X_121; double X_132; double X_242; int num_mu,index_mu,icount; int l; double ll; double * cl_unlensed; /* cl_unlensed[index_ct] */ double * cl_tt; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_te; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_ee; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_bb; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_pp; /* potential cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double res,resX,lens; double resp, resm, lensp, lensm; double * sqrt1; double * sqrt2; double * sqrt3; double * sqrt4; double * sqrt5; double ** cl_md_ic; /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */ double ** cl_md; /* array with argument cl_md[index_md][index_ct] */ int index_md; /* Timing */ //double debut, fin; //double cpu_time; /** Summary: */ /** - check that we really want to compute at least one spectrum */ if (ple->has_lensed_cls == _FALSE_) { if (ple->lensing_verbose > 0) printf("No lensing requested. Lensing module skipped.\n"); return _SUCCESS_; } else { if (ple->lensing_verbose > 0) { printf("Computing lensed spectra "); if (ppr->accurate_lensing==_TRUE_) printf("(accurate mode)\n"); else printf("(fast mode)\n"); } } /** - initialize indices and allocate some of the arrays in the lensing structure */ class_call(lensing_indices(ppr,psp,ple), ple->error_message, ple->error_message); /** put here all precision variables; will be stored later in precision structure */ /** Last element in mu will be for mu=1, needed for sigma2 The rest will be chosen as roots of a Gauss-Legendre quadrature **/ if (ppr->accurate_lensing == _TRUE_) { num_mu=(ple->l_unlensed_max+ppr->num_mu_minus_lmax); /* Must be even ?? CHECK */ num_mu += num_mu%2; /* Force it to be even */ } else { /* Integrate correlation function difference on [0,pi/16] */ num_mu = (ple->l_unlensed_max * 2 )/16; } /** - allocate array of mu values, as well as quadrature weights */ class_alloc(mu, num_mu*sizeof(double), ple->error_message); /* Reserve last element of mu for mu=1, needed for sigma2 */ mu[num_mu-1] = 1.0; class_alloc(w8, (num_mu-1)*sizeof(double), ple->error_message); if (ppr->accurate_lensing == _TRUE_) { //debut = omp_get_wtime(); class_call(quadrature_gauss_legendre(mu, w8, num_mu-1, ppr->tol_gauss_legendre, ple->error_message), ple->error_message, ple->error_message); //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in quadrature_gauss_legendre=%4.3f s\n",cpu_time); } else { /* Crude integration on [0,pi/16]: Riemann sum on theta */ delta_theta = _PI_/16. / (double)(num_mu-1); for (index_mu=0;index_mu<num_mu-1;index_mu++) { theta = (index_mu+1)*delta_theta; mu[index_mu] = cos(theta); w8[index_mu] = sin(theta)*delta_theta; /* We integrate on mu */ } } /** - compute d^l_mm'(mu) */ icount = 0; class_alloc(d00, num_mu*sizeof(double*), ple->error_message); class_alloc(d11, num_mu*sizeof(double*), ple->error_message); class_alloc(d1m1, num_mu*sizeof(double*), ple->error_message); class_alloc(d2m2, num_mu*sizeof(double*), ple->error_message); icount += 4*num_mu*(ple->l_unlensed_max+1); if(ple->has_te==_TRUE_) { class_alloc(d20, num_mu*sizeof(double*), ple->error_message); class_alloc(d3m1, num_mu*sizeof(double*), ple->error_message); class_alloc(d4m2, num_mu*sizeof(double*), ple->error_message); icount += 3*num_mu*(ple->l_unlensed_max+1); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_alloc(d22, num_mu*sizeof(double*), ple->error_message); class_alloc(d31, num_mu*sizeof(double*), ple->error_message); class_alloc(d3m3, num_mu*sizeof(double*), ple->error_message); class_alloc(d40, num_mu*sizeof(double*), ple->error_message); class_alloc(d4m4, num_mu*sizeof(double*), ple->error_message); icount += 5*num_mu*(ple->l_unlensed_max+1); } icount += 5*(ple->l_unlensed_max+1); /* for arrays sqrt1[l] to sqrt5[l] */ /** Allocate main contiguous buffer **/ class_alloc(buf_dxx, icount * sizeof(double), ple->error_message); icount = 0; for (index_mu=0; index_mu<num_mu; index_mu++) { d00[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d11[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d1m1[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); d2m2[index_mu]= &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]); } icount += 4*num_mu*(ple->l_unlensed_max+1); if (ple->has_te==_TRUE_) { for (index_mu=0; index_mu<num_mu; index_mu++) { d20[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d3m1[index_mu]= &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d4m2[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); } icount += 3*num_mu*(ple->l_unlensed_max+1); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { for (index_mu=0; index_mu<num_mu; index_mu++) { d22[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d31[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d3m3[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); d40[index_mu] = &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]); d4m4[index_mu]= &(buf_dxx[icount+(index_mu+4*num_mu) * (ple->l_unlensed_max+1)]); } icount += 5*num_mu*(ple->l_unlensed_max+1); } sqrt1 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt2 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt3 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt4 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt5 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; //debut = omp_get_wtime(); class_call(lensing_d00(mu,num_mu,ple->l_unlensed_max,d00), ple->error_message, ple->error_message); class_call(lensing_d11(mu,num_mu,ple->l_unlensed_max,d11), ple->error_message, ple->error_message); class_call(lensing_d1m1(mu,num_mu,ple->l_unlensed_max,d1m1), ple->error_message, ple->error_message); class_call(lensing_d2m2(mu,num_mu,ple->l_unlensed_max,d2m2), ple->error_message, ple->error_message); //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in lensing_dxx=%4.3f s\n",cpu_time); if (ple->has_te==_TRUE_) { class_call(lensing_d20(mu,num_mu,ple->l_unlensed_max,d20), ple->error_message, ple->error_message); class_call(lensing_d3m1(mu,num_mu,ple->l_unlensed_max,d3m1), ple->error_message, ple->error_message); class_call(lensing_d4m2(mu,num_mu,ple->l_unlensed_max,d4m2), ple->error_message, ple->error_message); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_call(lensing_d22(mu,num_mu,ple->l_unlensed_max,d22), ple->error_message, ple->error_message); class_call(lensing_d31(mu,num_mu,ple->l_unlensed_max,d31), ple->error_message, ple->error_message); class_call(lensing_d3m3(mu,num_mu,ple->l_unlensed_max,d3m3), ple->error_message, ple->error_message); class_call(lensing_d40(mu,num_mu,ple->l_unlensed_max,d40), ple->error_message, ple->error_message); class_call(lensing_d4m4(mu,num_mu,ple->l_unlensed_max,d4m4), ple->error_message, ple->error_message); } /** - compute Cgl(mu), Cgl2(mu) and sigma2(mu) */ class_alloc(Cgl, num_mu*sizeof(double), ple->error_message); class_alloc(Cgl2, num_mu*sizeof(double), ple->error_message); class_alloc(sigma2, (num_mu-1)*sizeof(double), /* Zero separation is omitted */ ple->error_message); class_alloc(cl_unlensed, psp->ct_size*sizeof(double), ple->error_message); /** Locally store unlensed temperature cl_tt and potential cl_pp spectra **/ class_alloc(cl_tt, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); if (ple->has_te==_TRUE_) { class_alloc(cl_te, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_alloc(cl_ee, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); class_alloc(cl_bb, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); } class_alloc(cl_pp, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); class_alloc(cl_md_ic, psp->md_size*sizeof(double *), ple->error_message); class_alloc(cl_md, psp->md_size*sizeof(double *), ple->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) class_alloc(cl_md[index_md], psp->ct_size*sizeof(double), ple->error_message); if (psp->ic_size[index_md] > 1) class_alloc(cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double), ple->error_message); } for (l=2; l<=ple->l_unlensed_max; l++) { class_call(spectra_cl_at_l(psp,l,cl_unlensed,cl_md,cl_md_ic), psp->error_message, ple->error_message); cl_tt[l] = cl_unlensed[ple->index_lt_tt]; cl_pp[l] = cl_unlensed[ple->index_lt_pp]; if (ple->has_te==_TRUE_) { cl_te[l] = cl_unlensed[ple->index_lt_te]; } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { cl_ee[l] = cl_unlensed[ple->index_lt_ee]; cl_bb[l] = cl_unlensed[ple->index_lt_bb]; } } for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) free(cl_md[index_md]); if (psp->ic_size[index_md] > 1) free(cl_md_ic[index_md]); } free(cl_md_ic); free(cl_md); /** Compute sigma2(mu) and Cgl2(mu) **/ //debut = omp_get_wtime(); #pragma omp parallel for \ private (index_mu,l) \ schedule (static) for (index_mu=0; index_mu<num_mu; index_mu++) { Cgl[index_mu]=0; Cgl2[index_mu]=0; for (l=2; l<=ple->l_unlensed_max; l++) { Cgl[index_mu] += (2.*l+1.)*l*(l+1.)* cl_pp[l]*d11[index_mu][l]; Cgl2[index_mu] += (2.*l+1.)*l*(l+1.)* cl_pp[l]*d1m1[index_mu][l]; } Cgl[index_mu] /= 4.*_PI_; Cgl2[index_mu] /= 4.*_PI_; } for (index_mu=0; index_mu<num_mu-1; index_mu++) { /* Cgl(1.0) - Cgl(mu) */ sigma2[index_mu] = Cgl[num_mu-1] - Cgl[index_mu]; } //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in Cgl,Cgl2,sigma2=%4.3f s\n",cpu_time); /** - compute ksi, ksi+, ksi-, ksiX */ /** ksi is for TT **/ if (ple->has_tt==_TRUE_) { class_calloc(ksi, (num_mu-1), sizeof(double), ple->error_message); } /** ksiX is for TE **/ if (ple->has_te==_TRUE_) { class_calloc(ksiX, (num_mu-1), sizeof(double), ple->error_message); } /** ksip, ksim for EE, BB **/ if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_calloc(ksip, (num_mu-1), sizeof(double), ple->error_message); class_calloc(ksim, (num_mu-1), sizeof(double), ple->error_message); } for (l=2;l<=ple->l_unlensed_max;l++) { ll = (double)l; sqrt1[l]=sqrt((ll+2)*(ll+1)*ll*(ll-1)); sqrt2[l]=sqrt((ll+2)*(ll-1)); sqrt3[l]=sqrt((ll+3)*(ll-2)); sqrt4[l]=sqrt((ll+4)*(ll+3)*(ll-2.)*(ll-3)); sqrt5[l]=sqrt(ll*(ll+1)); } //debut = omp_get_wtime(); #pragma omp parallel for \ private (index_mu,l,ll,res,resX,resp,resm,lens,lensp,lensm, \ fac,fac1,X_000,X_p000,X_220,X_022,X_p022,X_121,X_132,X_242) \ schedule (static) for (index_mu=0;index_mu<num_mu-1;index_mu++) { for (l=2;l<=ple->l_unlensed_max;l++) { ll = (double)l; fac = ll*(ll+1)/4.; fac1 = (2*ll+1)/(4.*_PI_); /* In the following we will keep terms of the form (sigma2)^k*(Cgl2)^m with k+m <= 2 */ X_000 = exp(-fac*sigma2[index_mu]); X_p000 = -fac*X_000; /* X_220 = 0.25*sqrt1[l] * exp(-(fac-0.5)*sigma2[index_mu]); */ X_220 = 0.25*sqrt1[l] * X_000; /* Order 0 */ /* next 5 lines useless, but avoid compilator warning 'may be used uninitialized' */ X_242=0.; X_132=0.; X_121=0.; X_p022=0.; X_022=0.; if (ple->has_te==_TRUE_ || ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { /* X_022 = exp(-(fac-1.)*sigma2[index_mu]); */ X_022 = X_000 * (1+sigma2[index_mu]*(1+0.5*sigma2[index_mu])); /* Order 2 */ X_p022 = (fac-1.)*X_022; /* X_242 = 0.25*sqrt4[l] * exp(-(fac-5./2.)*sigma2[index_mu]); */ X_242 = 0.25*sqrt4[l] * X_000; /* Order 0 */ if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { /* X_121 = - 0.5*sqrt2[l] * exp(-(fac-2./3.)*sigma2[index_mu]); X_132 = - 0.5*sqrt3[l] * exp(-(fac-5./3.)*sigma2[index_mu]); */ X_121 = -0.5*sqrt2[l] * X_000 * (1+2./3.*sigma2[index_mu]); /* Order 1 */ X_132 = -0.5*sqrt3[l] * X_000 * (1+5./3.*sigma2[index_mu]); /* Order 1 */ } } if (ple->has_tt==_TRUE_) { res = fac1*cl_tt[l]; lens = (X_000*X_000*d00[index_mu][l] + X_p000*X_p000*d1m1[index_mu][l] *Cgl2[index_mu]*8./(ll*(ll+1)) + (X_p000*X_p000*d00[index_mu][l] + X_220*X_220*d2m2[index_mu][l]) *Cgl2[index_mu]*Cgl2[index_mu]); if (ppr->accurate_lensing == _FALSE_) { /* Remove unlensed correlation function */ lens -= d00[index_mu][l]; } res *= lens; ksi[index_mu] += res; } if (ple->has_te==_TRUE_) { resX = fac1*cl_te[l]; lens = ( X_022*X_000*d20[index_mu][l] + Cgl2[index_mu]*2.*X_p000/sqrt5[l] * (X_121*d11[index_mu][l] + X_132*d3m1[index_mu][l]) + 0.5 * Cgl2[index_mu] * Cgl2[index_mu] * ( ( 2.*X_p022*X_p000+X_220*X_220 ) * d20[index_mu][l] + X_220*X_242*d4m2[index_mu][l] ) ); if (ppr->accurate_lensing == _FALSE_) { lens -= d20[index_mu][l]; } resX *= lens; ksiX[index_mu] += resX; } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { resp = fac1*(cl_ee[l]+cl_bb[l]); resm = fac1*(cl_ee[l]-cl_bb[l]); lensp = ( X_022*X_022*d22[index_mu][l] + 2.*Cgl2[index_mu]*X_132*X_121*d31[index_mu][l] + Cgl2[index_mu]*Cgl2[index_mu] * ( X_p022*X_p022*d22[index_mu][l] + X_242*X_220*d40[index_mu][l] ) ); lensm = ( X_022*X_022*d2m2[index_mu][l] + Cgl2[index_mu] * ( X_121*X_121*d1m1[index_mu][l] + X_132*X_132*d3m3[index_mu][l] ) + 0.5 * Cgl2[index_mu] * Cgl2[index_mu] * ( 2.*X_p022*X_p022*d2m2[index_mu][l] + X_220*X_220*d00[index_mu][l] + X_242*X_242*d4m4[index_mu][l] ) ); if (ppr->accurate_lensing == _FALSE_) { lensp -= d22[index_mu][l]; lensm -= d2m2[index_mu][l]; } resp *= lensp; resm *= lensm; ksip[index_mu] += resp; ksim[index_mu] += resm; } } } //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in ksi=%4.3f s\n",cpu_time); /** - compute lensed Cls by integration */ //debut = omp_get_wtime(); if (ple->has_tt==_TRUE_) { class_call(lensing_lensed_cl_tt(ksi,d00,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_tt(ple,cl_tt), ple->error_message, ple->error_message); } } if (ple->has_te==_TRUE_) { class_call(lensing_lensed_cl_te(ksiX,d20,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_te(ple,cl_te), ple->error_message, ple->error_message); } } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_call(lensing_lensed_cl_ee_bb(ksip,ksim,d22,d2m2,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_ee_bb(ple,cl_ee,cl_bb), ple->error_message, ple->error_message); } } //fin=omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in final lensing computation=%4.3f s\n",cpu_time); /** - spline computed Cls in view of interpolation */ class_call(array_spline_table_lines(ple->l, ple->l_size, ple->cl_lens, ple->lt_size, ple->ddcl_lens, _SPLINE_EST_DERIV_, ple->error_message), ple->error_message, ple->error_message); /** Free lots of stuff **/ free(buf_dxx); free(d00); free(d11); free(d1m1); free(d2m2); if (ple->has_te==_TRUE_) { free(d20); free(d3m1); free(d4m2); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(d22); free(d31); free(d3m3); free(d40); free(d4m4); } if (ple->has_tt==_TRUE_) free(ksi); if (ple->has_te==_TRUE_) free(ksiX); if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(ksip); free(ksim); } free(Cgl); free(Cgl2); free(sigma2); free(mu); free(w8); free(cl_unlensed); free(cl_tt); if (ple->has_te==_TRUE_) free(cl_te); if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(cl_ee); free(cl_bb); } free(cl_pp); /** Exits **/ return _SUCCESS_; } /** * This routine frees all the memory space allocated by lensing_init(). * * To be called at the end of each run, only when no further calls to * lensing_cl_at_l() are needed. * * @param ple Input: pointer to lensing structure (which fields must be freed) * @return the error status */ int lensing_free( struct lensing * ple ) { if (ple->has_lensed_cls == _TRUE_) { free(ple->l); free(ple->cl_lens); free(ple->ddcl_lens); free(ple->l_max_lt); } return _SUCCESS_; } /** * This routine defines indices and allocates tables in the lensing structure * * @param psp Input : pointer to spectra structure * @param ple Input/output: pointer to lensing structure * @return the error status */ int lensing_indices( struct precision * ppr, struct spectra * psp, struct lensing * ple ){ int index_l; double ** cl_md_ic; /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */ double ** cl_md; /* array with argument cl_md[index_md][index_ct] */ int index_md; int index_lt; /* indices of all Cl types (lensed and unlensed) */ if (psp->has_tt == _TRUE_) { ple->has_tt = _TRUE_; ple->index_lt_tt=psp->index_ct_tt; } else { ple->has_tt = _FALSE_; } if (psp->has_ee == _TRUE_) { ple->has_ee = _TRUE_; ple->index_lt_ee=psp->index_ct_ee; } else { ple->has_ee = _FALSE_; } if (psp->has_te == _TRUE_) { ple->has_te = _TRUE_; ple->index_lt_te=psp->index_ct_te; } else { ple->has_te = _FALSE_; } if (psp->has_bb == _TRUE_) { ple->has_bb = _TRUE_; ple->index_lt_bb=psp->index_ct_bb; } else { ple->has_bb = _FALSE_; } if (psp->has_pp == _TRUE_) { ple->has_pp = _TRUE_; ple->index_lt_pp=psp->index_ct_pp; } else { ple->has_pp = _FALSE_; } if (psp->has_tp == _TRUE_) { ple->has_tp = _TRUE_; ple->index_lt_tp=psp->index_ct_tp; } else { ple->has_tp = _FALSE_; } if (psp->has_dd == _TRUE_) { ple->has_dd = _TRUE_; ple->index_lt_dd=psp->index_ct_dd; } else { ple->has_dd = _FALSE_; } if (psp->has_td == _TRUE_) { ple->has_td = _TRUE_; ple->index_lt_td=psp->index_ct_td; } else { ple->has_td = _FALSE_; } if (psp->has_ll == _TRUE_) { ple->has_ll = _TRUE_; ple->index_lt_ll=psp->index_ct_ll; } else { ple->has_ll = _FALSE_; } if (psp->has_tl == _TRUE_) { ple->has_tl = _TRUE_; ple->index_lt_tl=psp->index_ct_tl; } else { ple->has_tl = _FALSE_; } ple->lt_size = psp->ct_size; /* number of multipoles */ ple->l_unlensed_max = psp->l_max_tot; ple->l_lensed_max = ple->l_unlensed_max - ppr->delta_l_max; for (index_l=0; (index_l < psp->l_size_max) && (psp->l[index_l] <= ple->l_lensed_max); index_l++); if (index_l < psp->l_size_max) index_l++; /* one more point in order to be able to interpolate till ple->l_lensed_max */ ple->l_size = index_l+1; class_alloc(ple->l,ple->l_size*sizeof(double),ple->error_message); for (index_l=0; index_l < ple->l_size; index_l++) { ple->l[index_l] = psp->l[index_l]; } /* allocate table where results will be stored */ class_alloc(ple->cl_lens, ple->l_size*ple->lt_size*sizeof(double), ple->error_message); class_alloc(ple->ddcl_lens, ple->l_size*ple->lt_size*sizeof(double), ple->error_message); /* fill with unlensed cls */ class_alloc(cl_md_ic, psp->md_size*sizeof(double *), ple->error_message); class_alloc(cl_md, psp->md_size*sizeof(double *), ple->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) class_alloc(cl_md[index_md], psp->ct_size*sizeof(double), ple->error_message); if (psp->ic_size[index_md] > 1) class_alloc(cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double), ple->error_message); } for (index_l=0; index_l<ple->l_size; index_l++) { class_call(spectra_cl_at_l(psp,ple->l[index_l],&(ple->cl_lens[index_l*ple->lt_size]),cl_md,cl_md_ic), psp->error_message, ple->error_message); } for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) free(cl_md[index_md]); if (psp->ic_size[index_md] > 1) free(cl_md_ic[index_md]); } free(cl_md_ic); free(cl_md); /* we want to output Cl_lensed up to the same l_max as Cl_unlensed (even if a number delta_l_max of extra values of l have been used internally for more accurate results). Notable exception to the above rule: ClBB_lensed(saclars) must be outputed at least up to the same l_max as ClEE_unlensed(scalars) (since ClBB_unlensed is null for scalars) */ class_alloc(ple->l_max_lt,ple->lt_size*sizeof(double),ple->error_message); for (index_lt = 0; index_lt < ple->lt_size; index_lt++) { ple->l_max_lt[index_lt]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][index_lt]); if ((ple->has_bb == _TRUE_) && (ple->has_ee == _TRUE_) && (index_lt == ple->index_lt_bb)) { ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][ple->index_lt_ee]); } } } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksi Input : Lensed correlation function (ksi[index_mu]) * @param d00 Input : Legendre polynomials (d^l_{00}[l][index_mu]) * @param w8 Input : Legendre quadrature weights (w8[index_mu]) * @param nmu Input : Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_tt( double *ksi, double **d00, double *w8, int nmu, struct lensing * ple ) { double cle; int imu; int index_l; /** Integration by Gauss-Legendre quadrature **/ #pragma omp parallel for \ private (imu,index_l,cle) \ schedule (static) for(index_l=0; index_l<ple->l_size; index_l++){ cle=0; for (imu=0;imu<nmu;imu++) { cle += ksi[imu]*d00[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt]=cle*2.0*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed cl_tt power spectrum * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_tt Input : Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_tt( struct lensing * ple, double *cl_tt) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt] += cl_tt[l]; } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksiX Input : Lensed correlation function (ksiX[index_mu]) * @param d20 Input : Wigner d-function (d^l_{20}[l][index_mu]) * @param w8 Input : Legendre quadrature weights (w8[index_mu]) * @param nmu Input : Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_te( double *ksiX, double **d20, double *w8, int nmu, struct lensing * ple ) { double clte; int imu; int index_l; /** Integration by Gauss-Legendre quadrature **/ #pragma omp parallel for \ private (imu,index_l,clte) \ schedule (static) for(index_l=0; index_l < ple->l_size; index_l++){ clte=0; for (imu=0;imu<nmu;imu++) { clte += ksiX[imu]*d20[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te]=clte*2.0*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed cl_te power spectrum * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_te Input : Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_te( struct lensing * ple, double *cl_te) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te] += cl_te[l]; } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksip Input : Lensed correlation function (ksi+[index_mu]) * @param ksim Input : Lensed correlation function (ksi-[index_mu]) * @param d22 Input : Wigner d-function (d^l_{22}[l][index_mu]) * @param d2m2 Input : Wigner d-function (d^l_{2-2}[l][index_mu]) * @param w8 Input : Legendre quadrature weights (w8[index_mu]) * @param nmu Input : Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_ee_bb( double *ksip, double *ksim, double **d22, double **d2m2, double *w8, int nmu, struct lensing * ple ) { double clp, clm; int imu; int index_l; /** Integration by Gauss-Legendre quadrature **/ #pragma omp parallel for \ private (imu,index_l,clp,clm) \ schedule (static) for(index_l=0; index_l < ple->l_size; index_l++){ clp=0; clm=0; for (imu=0;imu<nmu;imu++) { clp += ksip[imu]*d22[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ clm += ksim[imu]*d2m2[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee]=(clp+clm)*_PI_; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb]=(clp-clm)*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed cl_ee, cl_bb power spectra * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_ee Input : Array of unlensed power spectrum * @param cl_bb Input : Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_ee_bb( struct lensing * ple, double * cl_ee, double * cl_bb) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee] += cl_ee[l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb] += cl_bb[l]; } return _SUCCESS_; } /** * This routine computes the d00 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d00 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d00( double * mu, int num_mu, int lmax, double ** d00 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); for (l=1; l<lmax; l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(2*ll+1)/(ll+1); fac2[l] = sqrt((2*ll+3)/(2*ll-1))*ll/(ll+1); fac3[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { dlm1=1.0/sqrt(2.); /* l=0 */ d00[index_mu][0]=dlm1*sqrt(2.); dl=mu[index_mu] * sqrt(3./2.); /*l=1*/ d00[index_mu][1]=dl*sqrt(2./3.); for(l=1;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d00 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac2[l]*dlm1; d00[index_mu][l+1] = dlp1 * fac3[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); return _SUCCESS_; } /** * This routine computes the d11 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d11 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d11( double * mu, int num_mu, int lmax, double ** d11 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2)); fac2[l] = 1.0/(ll*(ll+1.)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d11[index_mu][0]=0; dlm1=(1.0+mu[index_mu])/2. * sqrt(3./2.); /*l=1*/ d11[index_mu][1]=dlm1 * sqrt(2./3.); dl=(1.0+mu[index_mu])/2.*(2.0*mu[index_mu]-1.0) * sqrt(5./2.); /*l=2*/ d11[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d11 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d11[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d1m1 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d1m1 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d1m1( double * mu, int num_mu, int lmax, double ** d1m1 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2)); fac2[l] = 1.0/(ll*(ll+1.)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d1m1[index_mu][0]=0; dlm1=(1.0-mu[index_mu])/2. * sqrt(3./2.); /*l=1*/ d1m1[index_mu][1]=dlm1 * sqrt(2./3.); dl=(1.0-mu[index_mu])/2.*(2.0*mu[index_mu]+1.0) * sqrt(5./2.); /*l=2*/ d1m1[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d1m1 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d1m1[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d2m2 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d2m2 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d2m2( double * mu, int num_mu, int lmax, double ** d2m2 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3)); fac2[l] = 4.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d2m2[index_mu][0]=0; dlm1=0.; /*l=1*/ d2m2[index_mu][1]=0; dl=(1.0-mu[index_mu])*(1.0-mu[index_mu])/4. * sqrt(5./2.); /*l=2*/ d2m2[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d2m2 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d2m2[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d22 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d22 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d22( double * mu, int num_mu, int lmax, double ** d22 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3)); fac2[l] = 4.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d22[index_mu][0]=0; dlm1=0.; /*l=1*/ d22[index_mu][1]=0; dl=(1.0+mu[index_mu])*(1.0+mu[index_mu])/4. * sqrt(5./2.); /*l=2*/ d22[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d22[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d20 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d20 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d20( double * mu, int num_mu, int lmax, double ** d20 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-1)*(ll+3))); fac3[l] = sqrt((2*ll+3)*(ll-2)*(ll+2)/((2*ll-1)*(ll-1)*(ll+3))); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d20[index_mu][0]=0; dlm1=0.; /*l=1*/ d20[index_mu][1]=0; dl=sqrt(15.)/4.*(1-mu[index_mu]*mu[index_mu]); /*l=2*/ d20[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1; d20[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d31 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d31 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d31( double * mu, int num_mu, int lmax, double ** d31 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1); fac2[l] = 3.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d31[index_mu][0]=0; d31[index_mu][1]=0; dlm1=0.; /*l=2*/ d31[index_mu][2]=0; dl=sqrt(105./2.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d31[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d31[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d3m1 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d3m1 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d3m1( double * mu, int num_mu, int lmax, double ** d3m1 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1); fac2[l] = 3.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d3m1[index_mu][0]=0; d3m1[index_mu][1]=0; dlm1=0.; /*l=2*/ d3m1[index_mu][2]=0; dl=sqrt(105./2.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d3m1[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d3m1[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d3m3 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d3m3 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d3m3( double * mu, int num_mu, int lmax, double ** d3m3 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-2)*(ll+4)); fac2[l] = 9.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-3)*(ll+3)*(l+1)/((ll-2)*(ll+4)*ll); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d3m3[index_mu][0]=0; d3m3[index_mu][1]=0; dlm1=0.; /*l=2*/ d3m3[index_mu][2]=0; dl=sqrt(7./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d3m3[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d3m3[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d40 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d40 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d40( double * mu, int num_mu, int lmax, double ** d40 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5))); fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)/((2*ll-1)*(ll-3)*(ll+5))); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d40[index_mu][0]=0; d40[index_mu][1]=0; d40[index_mu][2]=0; dlm1=0.; /*l=3*/ d40[index_mu][3]=0; dl=sqrt(315.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d40[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1; d40[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d4m2 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d4m2 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d4m2( double * mu, int num_mu, int lmax, double ** d4m2 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)*(ll-1)*(ll+3))) * (ll+1.); fac2[l] = 8./(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)*(ll-2)*(ll+2)/((2*ll-1)*(ll-3)*(ll+5)*(ll-1)*(ll+3)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d4m2[index_mu][0]=0; d4m2[index_mu][1]=0; d4m2[index_mu][2]=0; dlm1=0.; /*l=3*/ d4m2[index_mu][3]=0; dl=sqrt(126.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d4m2[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d4m2[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d4m4 term * * @param mu Input : Vector of cos(beta) values * @param num_mu Input : Number of cos(beta) values * @param lmax Input : maximum multipole * @param d4m4 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on sqrt((2l+1)/2) d^l_{mm'} for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d4m4( double * mu, int num_mu, int lmax, double ** d4m4 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-3)*(ll+5)); fac2[l] = 16./(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-4)*(ll+4)*(ll+1)/((ll-3)*(ll+5)*ll); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d4m4[index_mu][0]=0; d4m4[index_mu][1]=0; d4m4[index_mu][2]=0; dlm1=0.; /*l=3*/ d4m4[index_mu][3]=0; dl=sqrt(9./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d4m4[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d4m4[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(8*t1+Nx+7,2048)),floord(16*t2+Nx+3,2048)),floord(8*t3+Nx-5,2048)),floord(16*t1-16*t2+Nz+Nx+5,2048));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),512*t4+510);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ktensor.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <stdlib.h> #include <string.h> #include "../error/error.h" /** * Assign a new Kruskal tensor. * * @param[out] ktsr Kruskal tensor * @param[in] nmodes the number of dimensions/modes/tensor order * @param[in] ndims the mode sizes * @param[in] rank tensor rank or the number of columns of factor matrices * */ int sptNewKruskalTensor(sptKruskalTensor *ktsr, sptIndex nmodes, const sptIndex ndims[], sptIndex rank) { ktsr->nmodes = nmodes; ktsr->rank = rank; ktsr->ndims = (sptIndex*)malloc(nmodes*sizeof(sptIndex)); for(sptIndex i=0; i<nmodes; ++i) ktsr->ndims[i] = ndims[i]; ktsr->lambda = (sptValue*)malloc(rank*sizeof(sptValue)); ktsr->fit = 0.0; return 0; } /** * Shuffle factor matrices row indices. * * @param[out] map_inds is the renumbering mapping * @param[in] ktsr Kruskal tensor to be shuffled * */ void sptKruskalTensorInverseShuffleIndices(sptKruskalTensor * ktsr, sptIndex ** map_inds) { /* Renumber factor matrices rows */ sptIndex new_i; for(sptIndex m=0; m < ktsr->nmodes; ++m) { sptMatrix * mtx = ktsr->factors[m]; sptIndex * mode_map_inds = map_inds[m]; sptValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (sptValue)); for(sptIndex i=0; i<mtx->nrows; ++i) { new_i = mode_map_inds[i]; for(sptIndex j=0; j<mtx->ncols; ++j) { tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j]; } } free(mtx->values); mtx->values = tmp_values; } } /** * Free a new Kruskal tensor. * * @param[in] ktsr Kruskal tensor * */ void sptFreeKruskalTensor(sptKruskalTensor *ktsr) { ktsr->rank = 0; ktsr->fit = 0.0; free(ktsr->ndims); free(ktsr->lambda); for(sptIndex i=0; i<ktsr->nmodes; ++i) sptFreeMatrix(ktsr->factors[i]); free(ktsr->factors); ktsr->nmodes = 0; } /** * Compute the fit of a Kruskal tensor to a sparse tensor. * * @param[in] spten a COO sparse tensor. * @param[in] lambda the weight array * @param[in] mats factor matrices * @param[in] ata the results of ATA, A is a factor matrix * @return fit a double-precision float-point value * */ double sptKruskalTensorFit( sptSparseTensor const * const spten, sptValue const * const __restrict lambda, sptMatrix ** mats, sptMatrix ** ata) { sptIndex const nmodes = spten->nmodes; double spten_normsq = SparseTensorFrobeniusNormSquared(spten); double const norm_mats = sptKruskalTensorFrobeniusNormSquared(nmodes, lambda, ata); double const inner = sptSparseKruskalTensorInnerProduct(nmodes, lambda, mats); double residual = spten_normsq + norm_mats - 2 * inner; if (residual > 0.0) { residual = sqrt(residual); } double fit = 1 - (residual / sqrt(spten_normsq)); return fit; } // Column-major. /* Compute a Kruskal tensor's norm is compute on "ata"s. Check Tammy's sparse */ double sptKruskalTensorFrobeniusNormSquared( sptIndex const nmodes, sptValue const * const __restrict lambda, sptMatrix ** ata) // ata: column-major { sptIndex const rank = ata[0]->ncols; sptIndex const stride = ata[0]->stride; sptValue * const __restrict tmp_atavals = ata[nmodes]->values; // Column-major double norm_mats = 0; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex x=0; x < rank*stride; ++x) { tmp_atavals[x] = 1.; } /* Compute Hadamard product for all "ata"s */ for(sptIndex m=0; m < nmodes; ++m) { sptValue const * const __restrict atavals = ata[m]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < rank; ++i) { for(sptIndex j=i; j < rank; ++j) { tmp_atavals[j * stride + i] *= atavals[j * stride + i]; } } } /* compute lambda^T * aTa[MAX_NMODES] * lambda, only compute a half of them because of its symmetric */ #ifdef PARTI_USE_OPENMP #pragma omp parallel for reduction(+:norm_mats) #endif for(sptIndex i=0; i < rank; ++i) { norm_mats += tmp_atavals[i+(i*stride)] * lambda[i] * lambda[i]; for(sptIndex j=i+1; j < rank; ++j) { norm_mats += tmp_atavals[i+(j*stride)] * lambda[i] * lambda[j] * 2; } } return fabs(norm_mats); } // Row-major, compute via MTTKRP result (mats[nmodes]) and mats[nmodes-1]. double sptSparseKruskalTensorInnerProduct( sptIndex const nmodes, sptValue const * const __restrict lambda, sptMatrix ** mats) { sptIndex const rank = mats[0]->ncols; sptIndex const stride = mats[0]->stride; sptIndex const last_mode = nmodes - 1; sptIndex const I = mats[last_mode]->nrows; sptValue const * const last_vals = mats[last_mode]->values; sptValue const * const tmp_vals = mats[nmodes]->values; sptValue * buffer_accum; double inner = 0; double * const __restrict accum = (double *) malloc(rank*sizeof(*accum)); #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex r=0; r < rank; ++r) { accum[r] = 0.0; } #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const nthreads = omp_get_num_threads(); #pragma omp master { buffer_accum = (sptValue *)malloc(nthreads * rank * sizeof(sptValue)); for(sptIndex j=0; j < nthreads * rank; ++j) buffer_accum[j] = 0.0; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const tid = omp_get_thread_num(); int const nthreads = omp_get_num_threads(); sptValue * loc_accum = buffer_accum + tid * rank; #pragma omp for for(sptIndex i=0; i < I; ++i) { for(sptIndex r=0; r < rank; ++r) { loc_accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)]; } } #pragma omp for for(sptIndex j=0; j < rank; ++j) { for(int i=0; i < nthreads; ++i) { accum[j] += buffer_accum[i*rank + j]; } } } #else for(sptIndex i=0; i < I; ++i) { for(sptIndex r=0; r < rank; ++r) { accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)]; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel for reduction(+:inner) #endif for(sptIndex r=0; r < rank; ++r) { inner += accum[r] * lambda[r]; } #ifdef PARTI_USE_OPENMP free(buffer_accum); #endif free(accum); return inner; }
SmallestElement.c
// Membros em ordem alfabética: // Gustavo T. Mastrobuono NUSP 10734411, // Henrique de S. Q. dos Santos NUSP 10819029, // Jhordan P. V. Pesantes NUSP 11733353, // Witor M. A. de Oliveira NUSP 10692190 e // Yorvin A. R. Carrion NUSP 11733332 #include <stdio.h> #include <omp.h> #define T 4 // numero de threads int main(){ int i = 0, n = 0, pos = 0; // Esse scanf assume que os valores estão dispostos linha por linha // como no arquivo input.txt deixado como exemplo scanf("%d\n%d", &n, &pos); // le tamanho do vetor e posição de comparação int vet[n], menorLocal = 99999, menorGlobal = 99999; for(int i = 0; i < n; i++) scanf("%d", &vet[i]); // le e armazena valores no vetor // Inicializa um loop em paralelo atribuindo n/T iteracoes pra cada // thread, inicializa menorLocal = 99999 em cada thread localmente // e compartilha a variavel menorGlobal entre os processos #pragma omp parallel for firstprivate(menorLocal)\ shared(menorGlobal) schedule(dynamic, n/T) for(i = 0; i < n; i++){ if((vet[i] > vet[pos]) && (vet[i] < menorLocal)) menorLocal = vet[i]; // Após a computação do menor local no bloco recebido pelo // processo, efetua a comparação com o menor global #pragma omp critical if(menorLocal < menorGlobal) menorGlobal = menorLocal; } if(menorGlobal == 99999) printf("-1"); else printf("%d\n", menorGlobal); return 0; }
ACAMatrix.h
/*! * @file ACAKernel.h * @author Dalibor Lukas * @author Michal Merta * @author Jan Zapletal * @date June 16, 2014 * @brief Header file for class ACAMatrix * */ #ifndef ACAMATRIX_H #define ACAMATRIX_H #include "FullMatrix.h" #include "FastBESpace.h" namespace bem4i { template<class LO, class SC> class ACAMatrix : public Matrix<LO, SC> { typedef typename GetType<LO, SC>::SCVT SCVT; public: //! default constructor ACAMatrix( ); /*! * Constructor allocating a full matrix * * @param[in] nRows number of rows * @param[in] nCols number of columns */ ACAMatrix( LO nRows, LO nCols ); //! destructor virtual ~ACAMatrix( ); //! adds full nonadmissible block to the list of nonadmissible blocks /* inline void addNonadmissibleBlock( FullMatrix<LO, SC>* block ) { nonAdmissibleBlocks.push_back( block ); #pragma omp atomic update nonadmBlocksSize += block->getNCols( ) * block->getNRows( ); } */ //! adds full nonadmissible block associated with idx-th nonadmissible leaf to the list of nonadmissible blocks inline void addNonadmissibleBlock( FullMatrix<LO, SC>* block, LO idx ) { nonAdmissibleBlocks.at( idx ) = block; #pragma omp atomic update nonadmBlocksSize += block->getNCols( ) * block->getNRows( ); } //! returns pointer to a nonadmissible matrix with index idx inline FullMatrix<LO, SC>* getNonAdmissibleBlock( LO idx ) { return nonAdmissibleBlocks[idx]; } //! adds list of nonadmissible cluster pairs inline void setNonadmissibleDOFs( std::vector< BEBlockCluster< LO, SC > * > & leaves ) { this->nonadmissibleInnerDOFs.reserve( leaves.size( ) ); this->nonadmissibleOuterDOFs.reserve( leaves.size( ) ); for ( LO i = 0; i < leaves.size( ); ++i ) { this->nonadmissibleInnerDOFs.push_back( new std::vector< LO >( *( leaves[ i ]->innerDOFs ) ) ); if ( leaves.at( i )->innerDOFs->size( ) > this->maxBlockSize ) { this->maxBlockSize = leaves.at( i )->innerDOFs->size( ); } this->nonadmissibleOuterDOFs.push_back( new std::vector< LO >( *( leaves[ i ]->outerDOFs ) ) ); if ( leaves.at( i )->outerDOFs->size( ) > this->maxBlockSize ) { this->maxBlockSize = leaves.at( i )->outerDOFs->size( ); } } } //! adds list of admissible cluster pairs inline void setAdmissibleDOFs( std::vector< BEBlockCluster< LO, SC > * > & leaves ) { this->admissibleInnerDOFs.reserve( leaves.size( ) ); this->admissibleOuterDOFs.reserve( leaves.size( ) ); for ( LO i = 0; i < leaves.size( ); ++i ) { this->admissibleInnerDOFs.push_back( new std::vector< LO >( *( leaves[ i ]->innerDOFs ) ) ); if ( leaves.at( i )->innerDOFs->size( ) > this->maxBlockSize ) { this->maxBlockSize = leaves.at( i )->innerDOFs->size( ); } this->admissibleOuterDOFs.push_back( new std::vector< LO >( *( leaves[ i ]->outerDOFs ) ) ); if ( leaves.at( i )->outerDOFs->size( ) > this->maxBlockSize ) { this->maxBlockSize = leaves.at( i )->outerDOFs->size( ); } } } //! adds a pair of U, V matrices from ACA inline void addAdmissibleBlock( std::pair<FullMatrix<LO, SC>*, FullMatrix<LO, SC>* > blockUV, LO idx ) { admissibleBlocks[ idx ] = blockUV; if ( blockUV.first ) { #pragma omp atomic update admBlocksSize += blockUV.first->getNRows( ) * blockUV.first->getNCols( ); } if ( blockUV.second ) { #pragma omp atomic update admBlocksSize += blockUV.second->getNRows( ) * blockUV.second->getNCols( ); } } //! returns pointer to a pair of matrices U,V with index idx inline std::pair<FullMatrix<LO, SC>*, FullMatrix<LO, SC>* > getAdmissibleBlock( LO idx ) { return admissibleBlocks[idx]; } //! applies matrix to a vector virtual void apply( Vector<LO, SC> const &x, Vector<LO, SC> &y, bool transA = false, SC alpha = 1.0, SC beta = 0.0 ); inline SCVT getCompressionRatio( ) { LO nCols = this->getNCols( ); LO nRows = this->getNRows( ); if ( nCols * nRows == 0 ) return 1.0; if ( this->p12p1disMat ) nCols = this->p12p1disMat->getNRows( ); if ( this->p1dis2p1Mat ) nRows = this->p1dis2p1Mat->getNCols( ); return (SCVT) ( ( nonadmBlocksSize + admBlocksSize ) / ( (SCVT) nCols * nRows ) ); } inline void resizeNonAdmBlocks( LO size ) { this->nonAdmissibleBlocks.resize( size ); } inline void resizeAdmBlocks( LO size ) { this->admissibleBlocks.resize( size ); } void print( std::ostream &stream = std::cout ) const { std::cout << "ACA Matrix\n"; std::cout << "Number of rows: " << this->nRows << std::endl; std::cout << "Number of cols: " << this->nCols << std::endl; }; void setP12p1dis( bool p12p1dis ) { this->p12p1dis = p12p1dis; } void setP1dis2p1( bool p1dis2p1 ) { this->p1dis2p1 = p1dis2p1; } bool getP12p1dis( ) { return this->p12p1dis; } bool getP1dis2p1( ) { return this->p1dis2p1; } void setP12p1disMatFromTriplets( LO nRows, LO nCols, std::vector<LO> & vecI, std::vector<LO> & vecJ, std::vector<SC> & vecV ); void setP1dis2p1MatFromTriplets( LO nRows, LO nCols, std::vector<LO> & vecI, std::vector<LO> & vecJ, std::vector<SC> & vecV ); protected: //! full size of approximated blocks LO nonadmBlocksSize; //! size of approximated blocks LO admBlocksSize; //! vector of nonadmissible leaves //std::vector<BEBlockCluster<LO, SC>*> nonadmissibleLeaves; //! vector of admissible leaves //std::vector<BEBlockCluster<LO, SC>*> admissibleLeaves; //! vector of nonadmissible matrix blocks std::vector<FullMatrix<LO, SC>* > nonAdmissibleBlocks; std::vector<std::vector<LO>*> admissibleInnerDOFs; std::vector<std::vector<LO>*> admissibleOuterDOFs; std::vector<std::vector<LO>*> nonadmissibleInnerDOFs; std::vector<std::vector<LO>*> nonadmissibleOuterDOFs; LO maxBlockSize; //! vector of admissible matrix blocks std::vector<std::pair< FullMatrix<LO, SC>*, FullMatrix<LO, SC>*> > admissibleBlocks; //! whether to use transformation matrix from p1dis to p1 elements bool p12p1dis; bool p1dis2p1; SparseMatrix<LO, SC> * p12p1disMat; SparseMatrix<LO, SC> * p1dis2p1Mat; // bool deleteInnerDOFsLists; // bool deleteOuterDOFsLists; private: //! copy constructor ACAMatrix( const ACAMatrix& orig ); }; } // include .cpp file to overcome linking problems due to templates #include "ACAMatrix.cpp" #endif /* ACAMATRIX_H */
drift.c
/* * clockperf * * Copyright (c) 2016-2021, Steven Noonan <steven@uplinklabs.net> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "prefix.h" #include "affinity.h" #include "clock.h" #include "drift.h" #ifdef HAVE_DRIFT_TESTS #include <assert.h> #include <stdbool.h> #include <omp.h> struct global_cfg { struct clockspec clk; struct clockspec ref; }; typedef enum { UNSTARTED = 0, // not yet spawned WAITING = 1, // waiting for requests from master REPORTING = 2, // thread asked to report in EXITING = 3, // thread asked to exit DEAD = 4, // thread exited } thread_state; struct thread_ctx { thread_state state; uint64_t last_clk; uint64_t last_ref; char padding[104]; // padding to at least one L2 cache line wide }; static inline int driftsleep(int usec) { #ifdef TARGET_OS_WINDOWS usec /= 1000; if (usec < 1) usec = 1; Sleep(usec); return 0; #else return usleep(usec); #endif } static uint32_t thread_count; void drift_init(void) { #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); } } } void drift_run(uint32_t runtime_ms, struct clockspec clkid, struct clockspec refid) { uint32_t idx; struct thread_ctx *threads = NULL; struct global_cfg cfg; memset(&cfg, 0, sizeof(struct global_cfg)); cfg.clk = clkid; cfg.ref = refid; threads = (struct thread_ctx *)calloc(thread_count, sizeof(struct thread_ctx)); /* Spawn drift thread per CPU */ #pragma omp parallel { #pragma omp master { struct thread_ctx *thread, *this = NULL; uint64_t start_ref, start_clk; int64_t delta_clk, expect_ms_ref; uint32_t unstarted; do { unstarted = 0; for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state == UNSTARTED) { unstarted++; this = thread; } } } while (unstarted != 1); thread_bind(omp_get_thread_num()); //uint64_t curr_clk; //int64_t delta_ref, expect_ms_clk; clock_read(cfg.clk, &start_clk); clock_read(cfg.ref, &start_ref); do { for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; if (thread->state > UNSTARTED) thread->state = REPORTING; } clock_read(cfg.clk, &this->last_clk); clock_read(cfg.ref, &this->last_ref); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; while (thread->state == REPORTING) driftsleep(10); } expect_ms_ref = (this->last_ref / 1000000ULL) - (start_ref / 1000000ULL); //expect_ms_clk = (this->last_clk / 1000000ULL) - (start_clk / 1000000ULL); printf("%9" PRId64 ": ", expect_ms_ref); for (idx = 0; idx < thread_count; idx++) { //int64_t ref_ms; int64_t clk_ms; thread = &threads[idx]; //ref_ms = (thread->last_ref / 1000000ULL) - (start_ref / 1000000ULL); clk_ms = (thread->last_clk / 1000000ULL) - (start_clk / 1000000ULL); //delta_ref = (ref_ms - expect_ms_ref); delta_clk = (clk_ms - expect_ms_ref); printf("%6" PRId64 ", ", delta_clk); if ((idx + 1) % 8 == 0 && idx < thread_count - 1) printf("\n%11s", ""); } printf("\n"); driftsleep(1000000); } while(expect_ms_ref < runtime_ms); for (idx = 0; idx < thread_count; idx++) { thread = &threads[idx]; thread->state = EXITING; } } #pragma omp for for(uint32_t i = 0; i < thread_count; i++) { uint32_t thread_id = omp_get_thread_num(); struct thread_ctx *ctx = &threads[thread_id]; struct clockspec clk_id = cfg.clk; struct clockspec ref_id = cfg.ref; thread_bind(thread_id); //printf("starting thread %d : %d\n", thread_id, i); if (ctx->state != UNSTARTED) continue; do { uint64_t clk; uint64_t ref; while (ctx->state == WAITING) { //printf("thread %d:%d waiting\n", thread_id, i); driftsleep(100); } if (ctx->state == EXITING) break; clock_read(clk_id, &clk); clock_read(ref_id, &ref); ctx->last_clk = clk; ctx->last_ref = ref; ctx->state = WAITING; } while(1); ctx->state = DEAD; } } free(threads); } #endif /* vim: set ts=4 sts=4 sw=4 et: */
Compiler.c
// this is autogenerated file, do not edit it. #include "ficus/ficus.h" struct _fx_Nt6option1N10Ast__typ_t_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst); struct _fx_Nt6option1N10Ast__exp_t_data_t; static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst); struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t; static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst); struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t; static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst); struct _fx_N10Ast__typ_t_data_t; static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst); struct _fx_N13Ast__binary_t_data_t; static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst); struct _fx_N10Ast__exp_t_data_t; static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst); struct _fx_N10Ast__pat_t_data_t; static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst); struct _fx_N16Ast__env_entry_t_data_t; static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst); struct _fx_N16Ast__defmodule_t_data_t; static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst); struct _fx_N14K_form__ktyp_t_data_t; static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst); struct _fx_N14K_form__kexp_t_data_t; static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst); struct _fx_N14C_form__ctyp_t_data_t; static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst); struct _fx_N14C_form__cexp_t_data_t; static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst); struct _fx_N15C_form__cstmt_t_data_t; static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst); typedef struct _fx_Nt6option1N10Ast__typ_t_data_t { int_ rc; union { struct _fx_N10Ast__typ_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__typ_t_data_t, *_fx_Nt6option1N10Ast__typ_t; typedef struct _fx_LS_data_t { int_ rc; struct _fx_LS_data_t* tl; fx_str_t hd; } _fx_LS_data_t, *_fx_LS; typedef struct _fx_FPS1B { int (*fp)(bool, fx_str_t*, void*); fx_fcv_t* fcv; } _fx_FPS1B; typedef struct _fx_N17Options__optval_t { int tag; union { bool OptBool; int_ OptInt; fx_str_t OptString; } u; } _fx_N17Options__optval_t; typedef struct _fx_T2SN17Options__optval_t { fx_str_t t0; struct _fx_N17Options__optval_t t1; } _fx_T2SN17Options__optval_t; typedef struct _fx_LT2SN17Options__optval_t_data_t { int_ rc; struct _fx_LT2SN17Options__optval_t_data_t* tl; struct _fx_T2SN17Options__optval_t hd; } _fx_LT2SN17Options__optval_t_data_t, *_fx_LT2SN17Options__optval_t; typedef struct _fx_R18Options__options_t { struct _fx_LS_data_t* app_args; fx_str_t app_filename; bool arch64; bool force_rebuild; fx_str_t build_dir; fx_str_t build_rootdir; fx_str_t cflags; fx_str_t clibs; bool compile_by_cpp; fx_str_t filename; bool gen_c; struct _fx_LS_data_t* include_path; bool debug; struct _fx_LT2SN17Options__optval_t_data_t* defines; int_ optim_iters; int_ inline_thresh; bool enable_openmp; bool relax; bool use_preamble; bool make_app; int_ optimize_level; fx_str_t output_name; bool print_ast0; bool print_ast; bool print_k0; bool print_k; bool print_tokens; bool run_app; bool verbose; bool W_unused; } _fx_R18Options__options_t; typedef struct _fx_Ta2i { int_ t0; int_ t1; } _fx_Ta2i; typedef struct _fx_T2Ta2iS { struct _fx_Ta2i t0; fx_str_t t1; } _fx_T2Ta2iS; typedef struct _fx_R9Ast__id_t { int_ m; int_ i; int_ j; } _fx_R9Ast__id_t; typedef struct _fx_R10Ast__loc_t { int_ m_idx; int_ line0; int_ col0; int_ line1; int_ col1; } _fx_R10Ast__loc_t; typedef struct _fx_T2R9Ast__id_ti { struct _fx_R9Ast__id_t t0; int_ t1; } _fx_T2R9Ast__id_ti; typedef struct _fx_T2Bi { bool t0; int_ t1; } _fx_T2Bi; typedef struct _fx_N12Ast__scope_t { int tag; union { int_ ScBlock; struct _fx_T2Bi ScLoop; int_ ScFold; int_ ScArrMap; int_ ScMap; int_ ScTry; struct _fx_R9Ast__id_t ScFun; struct _fx_R9Ast__id_t ScClass; struct _fx_R9Ast__id_t ScInterface; int_ ScModule; } u; } _fx_N12Ast__scope_t; typedef struct _fx_LN12Ast__scope_t_data_t { int_ rc; struct _fx_LN12Ast__scope_t_data_t* tl; struct _fx_N12Ast__scope_t hd; } _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t; typedef struct _fx_R16Ast__val_flags_t { bool val_flag_arg; bool val_flag_mutable; bool val_flag_temp; bool val_flag_tempref; bool val_flag_private; bool val_flag_subarray; bool val_flag_instance; struct _fx_T2R9Ast__id_ti val_flag_method; int_ val_flag_ctor; struct _fx_LN12Ast__scope_t_data_t* val_flag_global; } _fx_R16Ast__val_flags_t; typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd; } _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_R23C_form__cdefinterface_t { struct _fx_R9Ast__id_t ci_name; fx_str_t ci_cname; struct _fx_R9Ast__id_t ci_id; struct _fx_R9Ast__id_t ci_vtbl; struct _fx_R9Ast__id_t ci_base; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods; struct _fx_LN12Ast__scope_t_data_t* ci_scope; struct _fx_R10Ast__loc_t ci_loc; } _fx_R23C_form__cdefinterface_t; typedef struct _fx_rR23C_form__cdefinterface_t_data_t { int_ rc; struct _fx_R23C_form__cdefinterface_t data; } _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t; typedef struct _fx_N17Ast__fun_constr_t { int tag; union { int_ CtorVariant; struct _fx_R9Ast__id_t CtorFP; struct _fx_R9Ast__id_t CtorExn; } u; } _fx_N17Ast__fun_constr_t; typedef struct _fx_R16Ast__fun_flags_t { int_ fun_flag_pure; bool fun_flag_ccode; bool fun_flag_have_keywords; bool fun_flag_inline; bool fun_flag_nothrow; bool fun_flag_really_nothrow; bool fun_flag_private; struct _fx_N17Ast__fun_constr_t fun_flag_ctor; struct _fx_R9Ast__id_t fun_flag_method_of; bool fun_flag_uses_fv; bool fun_flag_recursive; bool fun_flag_instance; } _fx_R16Ast__fun_flags_t; typedef struct _fx_LN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LN15C_form__cstmt_t_data_t* tl; struct _fx_N15C_form__cstmt_t_data_t* hd; } _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t; typedef struct _fx_N19C_form__carg_attr_t { int tag; } _fx_N19C_form__carg_attr_t; typedef struct _fx_LN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LN19C_form__carg_attr_t_data_t* tl; struct _fx_N19C_form__carg_attr_t hd; } _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t; typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_LN19C_form__carg_attr_t_data_t* t2; } _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl; struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd; } _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_R17C_form__cdeffun_t { struct _fx_R9Ast__id_t cf_name; fx_str_t cf_cname; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args; struct _fx_N14C_form__ctyp_t_data_t* cf_rt; struct _fx_LN15C_form__cstmt_t_data_t* cf_body; struct _fx_R16Ast__fun_flags_t cf_flags; struct _fx_LN12Ast__scope_t_data_t* cf_scope; struct _fx_R10Ast__loc_t cf_loc; } _fx_R17C_form__cdeffun_t; typedef struct _fx_rR17C_form__cdeffun_t_data_t { int_ rc; struct _fx_R17C_form__cdeffun_t data; } _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t; typedef struct _fx_Ta2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; } _fx_Ta2R9Ast__id_t; typedef struct _fx_LR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* tl; struct _fx_R9Ast__id_t hd; } _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t; typedef struct _fx_R17C_form__ctprops_t { bool ctp_scalar; bool ctp_complex; bool ctp_ptr; bool ctp_pass_by_ref; struct _fx_LR9Ast__id_t_data_t* ctp_make; struct _fx_Ta2R9Ast__id_t ctp_free; struct _fx_Ta2R9Ast__id_t ctp_copy; } _fx_R17C_form__ctprops_t; typedef struct _fx_R17C_form__cdeftyp_t { struct _fx_R9Ast__id_t ct_name; struct _fx_N14C_form__ctyp_t_data_t* ct_typ; fx_str_t ct_cname; struct _fx_R17C_form__ctprops_t ct_props; int_ ct_data_start; struct _fx_R9Ast__id_t ct_enum; struct _fx_LR9Ast__id_t_data_t* ct_ifaces; struct _fx_R9Ast__id_t ct_ifaces_id; struct _fx_LN12Ast__scope_t_data_t* ct_scope; struct _fx_R10Ast__loc_t ct_loc; } _fx_R17C_form__cdeftyp_t; typedef struct _fx_rR17C_form__cdeftyp_t_data_t { int_ rc; struct _fx_R17C_form__cdeftyp_t data; } _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t; typedef struct _fx_Nt6option1N14C_form__cexp_t { int tag; union { struct _fx_N14C_form__cexp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__cexp_t; typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t { struct _fx_R9Ast__id_t t0; struct _fx_Nt6option1N14C_form__cexp_t t1; } _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl; struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd; } _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_R18C_form__cdefenum_t { struct _fx_R9Ast__id_t cenum_name; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members; fx_str_t cenum_cname; struct _fx_LN12Ast__scope_t_data_t* cenum_scope; struct _fx_R10Ast__loc_t cenum_loc; } _fx_R18C_form__cdefenum_t; typedef struct _fx_rR18C_form__cdefenum_t_data_t { int_ rc; struct _fx_R18C_form__cdefenum_t data; } _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t; typedef struct _fx_R19C_form__cdefmacro_t { struct _fx_R9Ast__id_t cm_name; fx_str_t cm_cname; struct _fx_LR9Ast__id_t_data_t* cm_args; struct _fx_LN15C_form__cstmt_t_data_t* cm_body; struct _fx_LN12Ast__scope_t_data_t* cm_scope; struct _fx_R10Ast__loc_t cm_loc; } _fx_R19C_form__cdefmacro_t; typedef struct _fx_rR19C_form__cdefmacro_t_data_t { int_ rc; struct _fx_R19C_form__cdefmacro_t data; } _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t; typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd; } _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_R23K_form__kdefinterface_t { struct _fx_R9Ast__id_t ki_name; struct _fx_R9Ast__id_t ki_base; fx_str_t ki_cname; struct _fx_R9Ast__id_t ki_id; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* ki_all_methods; struct _fx_LN12Ast__scope_t_data_t* ki_scope; struct _fx_R10Ast__loc_t ki_loc; } _fx_R23K_form__kdefinterface_t; typedef struct _fx_rR23K_form__kdefinterface_t_data_t { int_ rc; struct _fx_R23K_form__kdefinterface_t data; } _fx_rR23K_form__kdefinterface_t_data_t, *_fx_rR23K_form__kdefinterface_t; typedef struct _fx_R25K_form__kdefclosureinfo_t { struct _fx_R9Ast__id_t kci_arg; struct _fx_R9Ast__id_t kci_fcv_t; struct _fx_R9Ast__id_t kci_fp_typ; struct _fx_R9Ast__id_t kci_make_fp; struct _fx_R9Ast__id_t kci_wrap_f; } _fx_R25K_form__kdefclosureinfo_t; typedef struct _fx_R17K_form__kdeffun_t { struct _fx_R9Ast__id_t kf_name; fx_str_t kf_cname; struct _fx_LR9Ast__id_t_data_t* kf_params; struct _fx_N14K_form__ktyp_t_data_t* kf_rt; struct _fx_N14K_form__kexp_t_data_t* kf_body; struct _fx_R16Ast__fun_flags_t kf_flags; struct _fx_R25K_form__kdefclosureinfo_t kf_closure; struct _fx_LN12Ast__scope_t_data_t* kf_scope; struct _fx_R10Ast__loc_t kf_loc; } _fx_R17K_form__kdeffun_t; typedef struct _fx_rR17K_form__kdeffun_t_data_t { int_ rc; struct _fx_R17K_form__kdeffun_t data; } _fx_rR17K_form__kdeffun_t_data_t, *_fx_rR17K_form__kdeffun_t; typedef struct _fx_R17K_form__kdefexn_t { struct _fx_R9Ast__id_t ke_name; fx_str_t ke_cname; fx_str_t ke_base_cname; struct _fx_N14K_form__ktyp_t_data_t* ke_typ; bool ke_std; struct _fx_R9Ast__id_t ke_tag; struct _fx_R9Ast__id_t ke_make; struct _fx_LN12Ast__scope_t_data_t* ke_scope; struct _fx_R10Ast__loc_t ke_loc; } _fx_R17K_form__kdefexn_t; typedef struct _fx_rR17K_form__kdefexn_t_data_t { int_ rc; struct _fx_R17K_form__kdefexn_t data; } _fx_rR17K_form__kdefexn_t_data_t, *_fx_rR17K_form__kdefexn_t; typedef struct _fx_R17K_form__ktprops_t { bool ktp_complex; bool ktp_scalar; bool ktp_ptr; bool ktp_pass_by_ref; bool ktp_custom_free; bool ktp_custom_copy; } _fx_R17K_form__ktprops_t; typedef struct _fx_Nt6option1R17K_form__ktprops_t { int tag; union { struct _fx_R17K_form__ktprops_t Some; } u; } _fx_Nt6option1R17K_form__ktprops_t; typedef struct _fx_R16Ast__var_flags_t { int_ var_flag_class_from; bool var_flag_record; bool var_flag_recursive; bool var_flag_have_tag; bool var_flag_have_mutable; bool var_flag_opt; bool var_flag_instance; } _fx_R16Ast__var_flags_t; typedef struct _fx_LN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LN14K_form__ktyp_t_data_t* tl; struct _fx_N14K_form__ktyp_t_data_t* hd; } _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLR9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LR9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLR9Ast__id_t hd; } _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_R21K_form__kdefvariant_t { struct _fx_R9Ast__id_t kvar_name; fx_str_t kvar_cname; struct _fx_R9Ast__id_t kvar_proto; struct _fx_Nt6option1R17K_form__ktprops_t kvar_props; struct _fx_LN14K_form__ktyp_t_data_t* kvar_targs; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kvar_cases; struct _fx_LR9Ast__id_t_data_t* kvar_ctors; struct _fx_R16Ast__var_flags_t kvar_flags; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* kvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* kvar_scope; struct _fx_R10Ast__loc_t kvar_loc; } _fx_R21K_form__kdefvariant_t; typedef struct _fx_rR21K_form__kdefvariant_t_data_t { int_ rc; struct _fx_R21K_form__kdefvariant_t data; } _fx_rR21K_form__kdefvariant_t_data_t, *_fx_rR21K_form__kdefvariant_t; typedef struct _fx_R17K_form__kdeftyp_t { struct _fx_R9Ast__id_t kt_name; fx_str_t kt_cname; struct _fx_R9Ast__id_t kt_proto; struct _fx_Nt6option1R17K_form__ktprops_t kt_props; struct _fx_LN14K_form__ktyp_t_data_t* kt_targs; struct _fx_N14K_form__ktyp_t_data_t* kt_typ; struct _fx_LN12Ast__scope_t_data_t* kt_scope; struct _fx_R10Ast__loc_t kt_loc; } _fx_R17K_form__kdeftyp_t; typedef struct _fx_rR17K_form__kdeftyp_t_data_t { int_ rc; struct _fx_R17K_form__kdeftyp_t data; } _fx_rR17K_form__kdeftyp_t_data_t, *_fx_rR17K_form__kdeftyp_t; typedef struct _fx_R25K_form__kdefclosurevars_t { struct _fx_R9Ast__id_t kcv_name; fx_str_t kcv_cname; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kcv_freevars; struct _fx_LR9Ast__id_t_data_t* kcv_orig_freevars; struct _fx_LN12Ast__scope_t_data_t* kcv_scope; struct _fx_R10Ast__loc_t kcv_loc; } _fx_R25K_form__kdefclosurevars_t; typedef struct _fx_rR25K_form__kdefclosurevars_t_data_t { int_ rc; struct _fx_R25K_form__kdefclosurevars_t data; } _fx_rR25K_form__kdefclosurevars_t_data_t, *_fx_rR25K_form__kdefclosurevars_t; typedef struct _fx_Nt6option1R9Ast__id_t { int tag; union { struct _fx_R9Ast__id_t Some; } u; } _fx_Nt6option1R9Ast__id_t; typedef struct _fx_Nt6option1N10Ast__exp_t_data_t { int_ rc; union { struct _fx_N10Ast__exp_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__exp_t_data_t, *_fx_Nt6option1N10Ast__exp_t; typedef struct _fx_R13Ast__defval_t { struct _fx_R9Ast__id_t dv_name; struct _fx_N10Ast__typ_t_data_t* dv_typ; struct _fx_R16Ast__val_flags_t dv_flags; struct _fx_LN12Ast__scope_t_data_t* dv_scope; struct _fx_R10Ast__loc_t dv_loc; } _fx_R13Ast__defval_t; typedef struct _fx_FPi2R9Ast__id_tR9Ast__id_t { int (*fp)(struct _fx_R9Ast__id_t*, struct _fx_R9Ast__id_t*, int_*, void*); fx_fcv_t* fcv; } _fx_FPi2R9Ast__id_tR9Ast__id_t; typedef struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* root; struct _fx_FPi2R9Ast__id_tR9Ast__id_t cmp; } _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_LN10Ast__pat_t_data_t { int_ rc; struct _fx_LN10Ast__pat_t_data_t* tl; struct _fx_N10Ast__pat_t_data_t* hd; } _fx_LN10Ast__pat_t_data_t, *_fx_LN10Ast__pat_t; typedef struct _fx_rLR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* data; } _fx_rLR9Ast__id_t_data_t, *_fx_rLR9Ast__id_t; typedef struct _fx_R13Ast__deffun_t { struct _fx_R9Ast__id_t df_name; struct _fx_LR9Ast__id_t_data_t* df_templ_args; struct _fx_LN10Ast__pat_t_data_t* df_args; struct _fx_N10Ast__typ_t_data_t* df_typ; struct _fx_N10Ast__exp_t_data_t* df_body; struct _fx_R16Ast__fun_flags_t df_flags; struct _fx_LN12Ast__scope_t_data_t* df_scope; struct _fx_R10Ast__loc_t df_loc; struct _fx_rLR9Ast__id_t_data_t* df_templ_inst; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t df_env; } _fx_R13Ast__deffun_t; typedef struct _fx_rR13Ast__deffun_t_data_t { int_ rc; struct _fx_R13Ast__deffun_t data; } _fx_rR13Ast__deffun_t_data_t, *_fx_rR13Ast__deffun_t; typedef struct _fx_R13Ast__defexn_t { struct _fx_R9Ast__id_t dexn_name; struct _fx_N10Ast__typ_t_data_t* dexn_typ; struct _fx_LN12Ast__scope_t_data_t* dexn_scope; struct _fx_R10Ast__loc_t dexn_loc; } _fx_R13Ast__defexn_t; typedef struct _fx_rR13Ast__defexn_t_data_t { int_ rc; struct _fx_R13Ast__defexn_t data; } _fx_rR13Ast__defexn_t_data_t, *_fx_rR13Ast__defexn_t; typedef struct _fx_R13Ast__deftyp_t { struct _fx_R9Ast__id_t dt_name; struct _fx_LR9Ast__id_t_data_t* dt_templ_args; struct _fx_N10Ast__typ_t_data_t* dt_typ; bool dt_finalized; struct _fx_LN12Ast__scope_t_data_t* dt_scope; struct _fx_R10Ast__loc_t dt_loc; } _fx_R13Ast__deftyp_t; typedef struct _fx_rR13Ast__deftyp_t_data_t { int_ rc; struct _fx_R13Ast__deftyp_t data; } _fx_rR13Ast__deftyp_t_data_t, *_fx_rR13Ast__deftyp_t; typedef struct _fx_T2R9Ast__id_tN10Ast__typ_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__typ_t hd; } _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LTa2R9Ast__id_t_data_t* tl; struct _fx_Ta2R9Ast__id_t hd; } _fx_LTa2R9Ast__id_t_data_t, *_fx_LTa2R9Ast__id_t; typedef struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LTa2R9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t hd; } _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_R17Ast__defvariant_t { struct _fx_R9Ast__id_t dvar_name; struct _fx_LR9Ast__id_t_data_t* dvar_templ_args; struct _fx_N10Ast__typ_t_data_t* dvar_alias; struct _fx_R16Ast__var_flags_t dvar_flags; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* dvar_cases; struct _fx_LR9Ast__id_t_data_t* dvar_ctors; struct _fx_rLR9Ast__id_t_data_t* dvar_templ_inst; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* dvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* dvar_scope; struct _fx_R10Ast__loc_t dvar_loc; } _fx_R17Ast__defvariant_t; typedef struct _fx_rR17Ast__defvariant_t_data_t { int_ rc; struct _fx_R17Ast__defvariant_t data; } _fx_rR17Ast__defvariant_t_data_t, *_fx_rR17Ast__defvariant_t; typedef struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R16Ast__fun_flags_t t2; } _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl; struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t hd; } _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t, *_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_R19Ast__definterface_t { struct _fx_R9Ast__id_t di_name; struct _fx_R9Ast__id_t di_base; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_new_methods; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_all_methods; struct _fx_LN12Ast__scope_t_data_t* di_scope; struct _fx_R10Ast__loc_t di_loc; } _fx_R19Ast__definterface_t; typedef struct _fx_rR19Ast__definterface_t_data_t { int_ rc; struct _fx_R19Ast__definterface_t data; } _fx_rR19Ast__definterface_t_data_t, *_fx_rR19Ast__definterface_t; typedef struct _fx_N14Ast__id_info_t { int tag; union { struct _fx_R13Ast__defval_t IdDVal; struct _fx_rR13Ast__deffun_t_data_t* IdFun; struct _fx_rR13Ast__defexn_t_data_t* IdExn; struct _fx_rR13Ast__deftyp_t_data_t* IdTyp; struct _fx_rR17Ast__defvariant_t_data_t* IdVariant; struct _fx_rR19Ast__definterface_t_data_t* IdInterface; int_ IdModule; } u; } _fx_N14Ast__id_info_t; typedef struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t { int_ t0; fx_arr_t t1; struct _fx_N14Ast__id_info_t t2; } _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t; typedef struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t { int_ rc; union { struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t t; } u; } _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t, *_fx_Nt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N12Map__color_t { int tag; } _fx_N12Map__color_t; typedef struct _fx_LN16Ast__env_entry_t_data_t { int_ rc; struct _fx_LN16Ast__env_entry_t_data_t* tl; struct _fx_N16Ast__env_entry_t_data_t* hd; } _fx_LN16Ast__env_entry_t_data_t, *_fx_LN16Ast__env_entry_t; typedef struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_N12Map__color_t t0; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1; struct _fx_R9Ast__id_t t2; struct _fx_LN16Ast__env_entry_t_data_t* t3; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4; } _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t { int_ rc; union { struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t Node; } u; } _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t, *_fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_T2R10Ast__loc_tS { struct _fx_R10Ast__loc_t t0; fx_str_t t1; } _fx_T2R10Ast__loc_tS; typedef struct _fx_T2il { int_ t0; int64_t t1; } _fx_T2il; typedef struct _fx_T2iq { int_ t0; uint64_t t1; } _fx_T2iq; typedef struct _fx_T2id { int_ t0; double t1; } _fx_T2id; typedef struct _fx_N10Ast__lit_t { int tag; union { int64_t LitInt; struct _fx_T2il LitSInt; struct _fx_T2iq LitUInt; struct _fx_T2id LitFloat; fx_str_t LitString; char_ LitChar; bool LitBool; } u; } _fx_N10Ast__lit_t; typedef struct _fx_rNt6option1N10Ast__typ_t_data_t { int_ rc; struct _fx_Nt6option1N10Ast__typ_t_data_t* data; } _fx_rNt6option1N10Ast__typ_t_data_t, *_fx_rNt6option1N10Ast__typ_t; typedef struct _fx_LN10Ast__typ_t_data_t { int_ rc; struct _fx_LN10Ast__typ_t_data_t* tl; struct _fx_N10Ast__typ_t_data_t* hd; } _fx_LN10Ast__typ_t_data_t, *_fx_LN10Ast__typ_t; typedef struct _fx_T2LN10Ast__typ_tN10Ast__typ_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2LN10Ast__typ_tN10Ast__typ_t; typedef struct _fx_T2iN10Ast__typ_t { int_ t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2iN10Ast__typ_t; typedef struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t { struct _fx_R16Ast__val_flags_t t0; struct _fx_R9Ast__id_t t1; struct _fx_N10Ast__typ_t_data_t* t2; struct _fx_N10Ast__exp_t_data_t* t3; } _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl; struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t hd; } _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t, *_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB { struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0; bool t1; } _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t { int_ rc; struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB data; } _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t, *_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_T2LN10Ast__typ_tR9Ast__id_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_R9Ast__id_t t1; } _fx_T2LN10Ast__typ_tR9Ast__id_t; typedef struct _fx_N10Ast__typ_t_data_t { int_ rc; int tag; union { struct _fx_rNt6option1N10Ast__typ_t_data_t* TypVar; struct _fx_Nt6option1N10Ast__typ_t_data_t* TypVarTuple; struct _fx_N10Ast__typ_t_data_t* TypVarArray; int_ TypSInt; int_ TypUInt; int_ TypFloat; struct _fx_T2LN10Ast__typ_tN10Ast__typ_t TypFun; struct _fx_N10Ast__typ_t_data_t* TypList; struct _fx_N10Ast__typ_t_data_t* TypVector; struct _fx_LN10Ast__typ_t_data_t* TypTuple; struct _fx_N10Ast__typ_t_data_t* TypRef; struct _fx_T2iN10Ast__typ_t TypArray; struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t* TypRecord; struct _fx_T2LN10Ast__typ_tR9Ast__id_t TypApp; } u; } _fx_N10Ast__typ_t_data_t, *_fx_N10Ast__typ_t; typedef struct _fx_N12Ast__cmpop_t { int tag; } _fx_N12Ast__cmpop_t; typedef struct _fx_N13Ast__binary_t_data_t { int_ rc; int tag; union { struct _fx_N12Ast__cmpop_t OpCmp; struct _fx_N12Ast__cmpop_t OpDotCmp; struct _fx_N13Ast__binary_t_data_t* OpAugBinary; } u; } _fx_N13Ast__binary_t_data_t, *_fx_N13Ast__binary_t; typedef struct _fx_N12Ast__unary_t { int tag; } _fx_N12Ast__unary_t; typedef struct _fx_N13Ast__intrin_t { int tag; union { struct _fx_R9Ast__id_t IntrinMath; } u; } _fx_N13Ast__intrin_t; typedef struct _fx_N15Ast__for_make_t { int tag; } _fx_N15Ast__for_make_t; typedef struct _fx_R16Ast__for_flags_t { bool for_flag_parallel; struct _fx_N15Ast__for_make_t for_flag_make; bool for_flag_unzip; bool for_flag_fold; bool for_flag_nested; } _fx_R16Ast__for_flags_t; typedef struct _fx_N13Ast__border_t { int tag; } _fx_N13Ast__border_t; typedef struct _fx_N18Ast__interpolate_t { int tag; } _fx_N18Ast__interpolate_t; typedef struct _fx_T2BR10Ast__loc_t { bool t0; struct _fx_R10Ast__loc_t t1; } _fx_T2BR10Ast__loc_t; typedef struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__typ_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_Nt6option1N10Ast__exp_t_data_t* t1; struct _fx_Nt6option1N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__binary_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LN10Ast__exp_t_data_t { int_ rc; struct _fx_LN10Ast__exp_t_data_t* tl; struct _fx_N10Ast__exp_t_data_t* hd; } _fx_LN10Ast__exp_t_data_t, *_fx_LN10Ast__exp_t; typedef struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__exp_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LLN10Ast__exp_t_data_t { int_ rc; struct _fx_LLN10Ast__exp_t_data_t* tl; struct _fx_LN10Ast__exp_t_data_t* hd; } _fx_LLN10Ast__exp_t_data_t, *_fx_LLN10Ast__exp_t; typedef struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LLN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__exp_t hd; } _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN10Ast__exp_t_data_t* t3; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t4; } _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tN10Ast__exp_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl; struct _fx_T2N10Ast__pat_tN10Ast__exp_t hd; } _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t, *_fx_LT2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl; struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t hd; } _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t, *_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2ST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; fx_str_t t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3SST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__val_flags_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t; typedef struct _fx_T2iR9Ast__id_t { int_ t0; struct _fx_R9Ast__id_t t1; } _fx_T2iR9Ast__id_t; typedef struct _fx_LT2iR9Ast__id_t_data_t { int_ rc; struct _fx_LT2iR9Ast__id_t_data_t* tl; struct _fx_T2iR9Ast__id_t hd; } _fx_LT2iR9Ast__id_t_data_t, *_fx_LT2iR9Ast__id_t; typedef struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t { struct _fx_LT2iR9Ast__id_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LT2iR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3iLR9Ast__id_tR10Ast__loc_t { int_ t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3iLR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LSR10Ast__loc_t { struct _fx_LS_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LSR10Ast__loc_t; typedef struct _fx_N10Ast__exp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t ExpNop; struct _fx_T2BR10Ast__loc_t ExpBreak; struct _fx_R10Ast__loc_t ExpContinue; struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t ExpReturn; struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpRange; struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t ExpLit; struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t ExpIdent; struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpBinary; struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUnary; struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIntrin; struct _fx_T2R9Ast__id_tN10Ast__exp_t ExpSync; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpSeq; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkTuple; struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkArray; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkVector; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkRecord; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUpdateRecord; struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpCall; struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpAt; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpAssign; struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMem; struct _fx_T2N10Ast__exp_tR10Ast__loc_t ExpThrow; struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIf; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpWhile; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpDoWhile; struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t ExpFor; struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t ExpMap; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpTryCatch; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMatch; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpCast; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpTyped; struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t ExpCCode; struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t ExpData; struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t DefVal; struct _fx_rR13Ast__deffun_t_data_t* DefFun; struct _fx_rR13Ast__defexn_t_data_t* DefExn; struct _fx_rR13Ast__deftyp_t_data_t* DefTyp; struct _fx_rR17Ast__defvariant_t_data_t* DefVariant; struct _fx_rR19Ast__definterface_t_data_t* DefInterface; struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t DirImport; struct _fx_T3iLR9Ast__id_tR10Ast__loc_t DirImportFrom; struct _fx_T2LSR10Ast__loc_t DirPragma; } u; } _fx_N10Ast__exp_t_data_t, *_fx_N10Ast__exp_t; typedef struct _fx_T2N10Ast__lit_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__lit_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2R9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LN10Ast__pat_tR10Ast__loc_t { struct _fx_LN10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__pat_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__pat_t hd; } _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__pat_tR10Ast__loc_t; typedef struct _fx_N10Ast__pat_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t PatAny; struct _fx_T2N10Ast__lit_tR10Ast__loc_t PatLit; struct _fx_T2R9Ast__id_tR10Ast__loc_t PatIdent; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatTuple; struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t PatVariant; struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t PatRecord; struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t PatCons; struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t PatAs; struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t PatTyped; struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t PatWhen; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatAlt; struct _fx_T2N10Ast__pat_tR10Ast__loc_t PatRef; } u; } _fx_N10Ast__pat_t_data_t, *_fx_N10Ast__pat_t; typedef struct _fx_N16Ast__env_entry_t_data_t { int_ rc; int tag; union { struct _fx_R9Ast__id_t EnvId; struct _fx_N10Ast__typ_t_data_t* EnvTyp; } u; } _fx_N16Ast__env_entry_t_data_t, *_fx_N16Ast__env_entry_t; typedef struct _fx_T2SR10Ast__loc_t { fx_str_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2SR10Ast__loc_t; typedef struct _fx_LT2SR10Ast__loc_t_data_t { int_ rc; struct _fx_LT2SR10Ast__loc_t_data_t* tl; struct _fx_T2SR10Ast__loc_t hd; } _fx_LT2SR10Ast__loc_t_data_t, *_fx_LT2SR10Ast__loc_t; typedef struct _fx_Li_data_t { int_ rc; struct _fx_Li_data_t* tl; int_ hd; } _fx_Li_data_t, *_fx_Li; typedef struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t { struct _fx_R9Ast__id_t t0; fx_str_t t1; int_ t2; bool t3; struct _fx_LN10Ast__exp_t_data_t* t4; struct _fx_Li_data_t* t5; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t t6; bool t7; int_ t8; struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9; } _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N16Ast__defmodule_t_data_t { int_ rc; union { struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t defmodule_t; } u; } _fx_N16Ast__defmodule_t_data_t, *_fx_N16Ast__defmodule_t; typedef struct _fx_LE_data_t { int_ rc; struct _fx_LE_data_t* tl; fx_exn_t hd; } _fx_LE_data_t, *_fx_LE; typedef struct _fx_T2BS { bool t0; fx_str_t t1; } _fx_T2BS; typedef struct _fx_N14Lexer__token_t { int tag; union { struct _fx_N10Ast__lit_t LITERAL; struct _fx_T2BS IDENT; fx_str_t TYVAR; fx_str_t DATA; bool FOR; bool IMPORT; bool REF; bool RETURN; bool WHILE; bool LPAREN; bool LSQUARE; bool BACKSLASH; bool MINUS; bool PLUS; bool STAR; bool DOT_PLUS; bool DOT_MINUS; struct _fx_N13Ast__binary_t_data_t* AUG_BINOP; struct _fx_N12Ast__cmpop_t CMP; struct _fx_N12Ast__cmpop_t DOT_CMP; fx_str_t RESERVED; } u; } _fx_N14Lexer__token_t; typedef struct _fx_LN14Lexer__token_t_data_t { int_ rc; struct _fx_LN14Lexer__token_t_data_t* tl; struct _fx_N14Lexer__token_t hd; } _fx_LN14Lexer__token_t_data_t, *_fx_LN14Lexer__token_t; typedef struct _fx_N14K_form__klit_t { int tag; union { int64_t KLitInt; struct _fx_T2il KLitSInt; struct _fx_T2iq KLitUInt; struct _fx_T2id KLitFloat; fx_str_t KLitString; char_ KLitChar; bool KLitBool; struct _fx_N14K_form__ktyp_t_data_t* KLitNil; } u; } _fx_N14K_form__klit_t; typedef struct _fx_N14K_form__atom_t { int tag; union { struct _fx_R9Ast__id_t AtomId; struct _fx_N14K_form__klit_t AtomLit; } u; } _fx_N14K_form__atom_t; typedef struct _fx_Nt6option1N14K_form__atom_t { int tag; union { struct _fx_N14K_form__atom_t Some; } u; } _fx_Nt6option1N14K_form__atom_t; typedef struct _fx_LN14K_form__kexp_t_data_t { int_ rc; struct _fx_LN14K_form__kexp_t_data_t* tl; struct _fx_N14K_form__kexp_t_data_t* hd; } _fx_LN14K_form__kexp_t_data_t, *_fx_LN14K_form__kexp_t; typedef struct _fx_T2BN14K_form__atom_t { bool t0; struct _fx_N14K_form__atom_t t1; } _fx_T2BN14K_form__atom_t; typedef struct _fx_LT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LT2BN14K_form__atom_t_data_t* tl; struct _fx_T2BN14K_form__atom_t hd; } _fx_LT2BN14K_form__atom_t_data_t, *_fx_LT2BN14K_form__atom_t; typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t { struct _fx_LN14K_form__ktyp_t_data_t* t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2iN14K_form__ktyp_t { int_ t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2iN14K_form__ktyp_t; typedef struct _fx_N14K_form__ktyp_t_data_t { int_ rc; int tag; union { int_ KTypSInt; int_ KTypUInt; int_ KTypFloat; struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun; struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple; struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord; struct _fx_R9Ast__id_t KTypName; struct _fx_T2iN14K_form__ktyp_t KTypArray; struct _fx_N14K_form__ktyp_t_data_t* KTypVector; struct _fx_N14K_form__ktyp_t_data_t* KTypList; struct _fx_N14K_form__ktyp_t_data_t* KTypRef; } u; } _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t; typedef struct _fx_Ta3N14K_form__atom_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; } _fx_Ta3N14K_form__atom_t; typedef struct _fx_N13K_form__dom_t { int tag; union { struct _fx_N14K_form__atom_t DomainElem; struct _fx_N14K_form__atom_t DomainFast; struct _fx_Ta3N14K_form__atom_t DomainRange; } u; } _fx_N13K_form__dom_t; typedef struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t { struct _fx_Nt6option1N14K_form__atom_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__ktyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__binary_t_data_t* t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN14K_form__atom_t_data_t { int_ rc; struct _fx_LN14K_form__atom_t_data_t* tl; struct _fx_N14K_form__atom_t hd; } _fx_LN14K_form__atom_t_data_t, *_fx_LN14K_form__atom_t; typedef struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN14K_form__kexp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__kexp_t; typedef struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LLT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LLT2BN14K_form__atom_t_data_t* tl; struct _fx_LT2BN14K_form__atom_t_data_t* hd; } _fx_LLT2BN14K_form__atom_t_data_t, *_fx_LLT2BN14K_form__atom_t; typedef struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { bool t0; struct _fx_LLT2BN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2BN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN13K_form__dom_t_data_t { int_ rc; struct _fx_LN13K_form__dom_t_data_t* tl; struct _fx_N13K_form__dom_t hd; } _fx_LN13K_form__dom_t_data_t, *_fx_LN13K_form__dom_t; typedef struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN13K_form__dom_t_data_t* t3; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t4; } _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t { int_ rc; struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl; struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t hd; } _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t, *_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tBR10Ast__loc_t { struct _fx_R9Ast__id_t t0; bool t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tBR10Ast__loc_t; typedef struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN13K_form__dom_t { struct _fx_R9Ast__id_t t0; struct _fx_N13K_form__dom_t t1; } _fx_T2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl; struct _fx_T2R9Ast__id_tN13K_form__dom_t hd; } _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t, *_fx_LT2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1; struct _fx_LR9Ast__id_t_data_t* t2; } _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl; struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t hd; } _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t, *_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_N14K_form__kexp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t KExpNop; struct _fx_R10Ast__loc_t KExpBreak; struct _fx_R10Ast__loc_t KExpContinue; struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t KExpReturn; struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAtom; struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpBinary; struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpUnary; struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIntrin; struct _fx_T2R9Ast__id_tN14K_form__kexp_t KExpSync; struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpSeq; struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIf; struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpCall; struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpICall; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkTuple; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkRecord; struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkClosure; struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkArray; struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkVector; struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAt; struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t KExpMem; struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t KExpAssign; struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMatch; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpTryCatch; struct _fx_T3R9Ast__id_tBR10Ast__loc_t KExpThrow; struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t KExpCast; struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMap; struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t KExpFor; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpWhile; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpDoWhile; struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t KExpCCode; struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t KDefVal; struct _fx_rR17K_form__kdeffun_t_data_t* KDefFun; struct _fx_rR17K_form__kdefexn_t_data_t* KDefExn; struct _fx_rR21K_form__kdefvariant_t_data_t* KDefVariant; struct _fx_rR23K_form__kdefinterface_t_data_t* KDefInterface; struct _fx_rR17K_form__kdeftyp_t_data_t* KDefTyp; struct _fx_rR25K_form__kdefclosurevars_t_data_t* KDefClosureVars; } u; } _fx_N14K_form__kexp_t_data_t, *_fx_N14K_form__kexp_t; typedef struct _fx_R14Ast__pragmas_t { bool pragma_cpp; struct _fx_LT2SR10Ast__loc_t_data_t* pragma_clibs; } _fx_R14Ast__pragmas_t; typedef struct _fx_R17K_form__kmodule_t { struct _fx_R9Ast__id_t km_name; int_ km_idx; int_ km_toposort_idx; fx_str_t km_cname; struct _fx_LN14K_form__kexp_t_data_t* km_top; struct _fx_Li_data_t* km_deps; bool km_skip; bool km_main; struct _fx_R14Ast__pragmas_t km_pragmas; } _fx_R17K_form__kmodule_t; typedef struct _fx_LR17K_form__kmodule_t_data_t { int_ rc; struct _fx_LR17K_form__kmodule_t_data_t* tl; struct _fx_R17K_form__kmodule_t hd; } _fx_LR17K_form__kmodule_t_data_t, *_fx_LR17K_form__kmodule_t; typedef struct _fx_Nt6option1N14C_form__ctyp_t { int tag; union { struct _fx_N14C_form__ctyp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__ctyp_t; typedef struct _fx_N17C_form__cbinary_t { int tag; union { struct _fx_N12Ast__cmpop_t COpCmp; } u; } _fx_N17C_form__cbinary_t; typedef struct _fx_N16C_form__cunary_t { int tag; } _fx_N16C_form__cunary_t; typedef struct _fx_N19C_form__ctyp_attr_t { int tag; } _fx_N19C_form__ctyp_attr_t; typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1; } _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LN14C_form__ctyp_t_data_t* tl; struct _fx_N14C_form__ctyp_t_data_t* hd; } _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t; typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t { struct _fx_LN14C_form__ctyp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t; typedef struct _fx_LN19C_form__ctyp_attr_t_data_t { int_ rc; struct _fx_LN19C_form__ctyp_attr_t_data_t* tl; struct _fx_N19C_form__ctyp_attr_t hd; } _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t; typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t { struct _fx_LN19C_form__ctyp_attr_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t; typedef struct _fx_T2iN14C_form__ctyp_t { int_ t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2iN14C_form__ctyp_t; typedef struct _fx_N14C_form__ctyp_t_data_t { int_ rc; int tag; union { int_ CTypSInt; int_ CTypUInt; int_ CTypFloat; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion; struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray; struct _fx_T2iN14C_form__ctyp_t CTypArray; struct _fx_N14C_form__ctyp_t_data_t* CTypVector; struct _fx_R9Ast__id_t CTypName; } u; } _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t; typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14K_form__klit_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N17C_form__cbinary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N16C_form__cunary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_LN14C_form__cexp_t_data_t { int_ rc; struct _fx_LN14C_form__cexp_t_data_t* tl; struct _fx_N14C_form__cexp_t_data_t* hd; } _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t; typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_N14C_form__cexp_t_data_t { int_ rc; int tag; union { struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent; struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit; struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary; struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow; struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast; struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary; struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall; struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp; struct _fx_T2SR10Ast__loc_t CExpCCode; } u; } _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t; typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__cexp_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LN15C_form__cstmt_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t { struct _fx_R9Ast__id_t t0; struct _fx_N15C_form__cstmt_t_data_t* t1; } _fx_T2R9Ast__id_tN15C_form__cstmt_t; typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_N15C_form__cstmt_t_data_t* t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__ctyp_t t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_LN14C_form__cexp_t_data_t* t3; struct _fx_N15C_form__cstmt_t_data_t* t4; struct _fx_R10Ast__loc_t t5; } _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t { struct _fx_N15C_form__cstmt_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_N15C_form__cstmt_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t CStmtNop; struct _fx_T2SR10Ast__loc_t CComment; struct _fx_N14C_form__cexp_t_data_t* CExp; struct _fx_R10Ast__loc_t CStmtBreak; struct _fx_R10Ast__loc_t CStmtContinue; struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn; struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock; struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync; struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel; struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtFor; struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile; struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile; struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch; struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal; struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp; struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum; struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef; struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef; struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf; struct _fx_T2SR10Ast__loc_t CMacroInclude; struct _fx_T2SR10Ast__loc_t CMacroPragma; } u; } _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t; typedef struct _fx_R17C_form__cmodule_t { struct _fx_R9Ast__id_t cmod_name; fx_str_t cmod_cname; struct _fx_LN15C_form__cstmt_t_data_t* cmod_ccode; bool cmod_main; bool cmod_recompile; bool cmod_skip; struct _fx_R14Ast__pragmas_t cmod_pragmas; } _fx_R17C_form__cmodule_t; typedef struct _fx_LR17C_form__cmodule_t_data_t { int_ rc; struct _fx_LR17C_form__cmodule_t_data_t* tl; struct _fx_R17C_form__cmodule_t hd; } _fx_LR17C_form__cmodule_t_data_t, *_fx_LR17C_form__cmodule_t; typedef struct _fx_N20Compiler__msgcolor_t { int tag; } _fx_N20Compiler__msgcolor_t; typedef struct _fx_T2LN14Lexer__token_tB { struct _fx_LN14Lexer__token_t_data_t* t0; bool t1; } _fx_T2LN14Lexer__token_tB; typedef struct _fx_T2SB { fx_str_t t0; bool t1; } _fx_T2SB; typedef struct _fx_LT2SB_data_t { int_ rc; struct _fx_LT2SB_data_t* tl; struct _fx_T2SB hd; } _fx_LT2SB_data_t, *_fx_LT2SB; typedef struct _fx_T2SLS { fx_str_t t0; struct _fx_LS_data_t* t1; } _fx_T2SLS; typedef struct _fx_Ta2LS { struct _fx_LS_data_t* t0; struct _fx_LS_data_t* t1; } _fx_Ta2LS; typedef struct _fx_T2iLi { int_ t0; struct _fx_Li_data_t* t1; } _fx_T2iLi; typedef struct _fx_LT2iLi_data_t { int_ rc; struct _fx_LT2iLi_data_t* tl; struct _fx_T2iLi hd; } _fx_LT2iLi_data_t, *_fx_LT2iLi; typedef struct _fx_rLi_data_t { int_ rc; struct _fx_Li_data_t* data; } _fx_rLi_data_t, *_fx_rLi; typedef struct _fx_T3BBS { bool t0; bool t1; fx_str_t t2; } _fx_T3BBS; typedef struct _fx_T2LR17K_form__kmodule_tB { struct _fx_LR17K_form__kmodule_t_data_t* t0; bool t1; } _fx_T2LR17K_form__kmodule_tB; typedef struct _fx_Ta9S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; fx_str_t t4; fx_str_t t5; fx_str_t t6; fx_str_t t7; fx_str_t t8; } _fx_Ta9S; typedef struct _fx_Ta2S { fx_str_t t0; fx_str_t t1; } _fx_Ta2S; typedef struct _fx_Ta3S { fx_str_t t0; fx_str_t t1; fx_str_t t2; } _fx_Ta3S; typedef struct _fx_Ta4S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; } _fx_Ta4S; typedef struct _fx_T5BBLSBS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; fx_str_t t4; } _fx_T5BBLSBS; typedef struct _fx_T5BBLSBLS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; struct _fx_LS_data_t* t4; } _fx_T5BBLSBLS; typedef struct _fx_T2LR17C_form__cmodule_tB { struct _fx_LR17C_form__cmodule_t_data_t* t0; bool t1; } _fx_T2LR17C_form__cmodule_tB; typedef struct { int_ rc; int_ data; } _fx_E4Exit_data_t; typedef struct { int_ rc; fx_str_t data; } _fx_E4Fail_data_t; typedef struct { int_ rc; struct _fx_T2Ta2iS data; } _fx_E22LexerUtils__LexerError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E17Ast__CompileError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E18Parser__ParseError_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__typ_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_LS(struct _fx_LS_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LS, fx_free_str); } static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str); } static void _fx_free_N17Options__optval_t(struct _fx_N17Options__optval_t* dst) { switch (dst->tag) { case 3: fx_free_str(&dst->u.OptString); break; default: ; } dst->tag = 0; } static void _fx_copy_N17Options__optval_t(struct _fx_N17Options__optval_t* src, struct _fx_N17Options__optval_t* dst) { dst->tag = src->tag; switch (src->tag) { case 3: fx_copy_str(&src->u.OptString, &dst->u.OptString); break; default: dst->u = src->u; } } static void _fx_free_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* dst) { fx_free_str(&dst->t0); _fx_free_N17Options__optval_t(&dst->t1); } static void _fx_copy_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* src, struct _fx_T2SN17Options__optval_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_N17Options__optval_t(&src->t1, &dst->t1); } static void _fx_make_T2SN17Options__optval_t( fx_str_t* t0, struct _fx_N17Options__optval_t* t1, struct _fx_T2SN17Options__optval_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_N17Options__optval_t(t1, &fx_result->t1); } static void _fx_free_LT2SN17Options__optval_t(struct _fx_LT2SN17Options__optval_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_free_T2SN17Options__optval_t); } static int _fx_cons_LT2SN17Options__optval_t( struct _fx_T2SN17Options__optval_t* hd, struct _fx_LT2SN17Options__optval_t_data_t* tl, bool addref_tl, struct _fx_LT2SN17Options__optval_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_copy_T2SN17Options__optval_t); } static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst) { _fx_free_LS(&dst->app_args); fx_free_str(&dst->app_filename); fx_free_str(&dst->build_dir); fx_free_str(&dst->build_rootdir); fx_free_str(&dst->cflags); fx_free_str(&dst->clibs); fx_free_str(&dst->filename); _fx_free_LS(&dst->include_path); _fx_free_LT2SN17Options__optval_t(&dst->defines); fx_free_str(&dst->output_name); } static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst) { FX_COPY_PTR(src->app_args, &dst->app_args); fx_copy_str(&src->app_filename, &dst->app_filename); dst->arch64 = src->arch64; dst->force_rebuild = src->force_rebuild; fx_copy_str(&src->build_dir, &dst->build_dir); fx_copy_str(&src->build_rootdir, &dst->build_rootdir); fx_copy_str(&src->cflags, &dst->cflags); fx_copy_str(&src->clibs, &dst->clibs); dst->compile_by_cpp = src->compile_by_cpp; fx_copy_str(&src->filename, &dst->filename); dst->gen_c = src->gen_c; FX_COPY_PTR(src->include_path, &dst->include_path); dst->debug = src->debug; FX_COPY_PTR(src->defines, &dst->defines); dst->optim_iters = src->optim_iters; dst->inline_thresh = src->inline_thresh; dst->enable_openmp = src->enable_openmp; dst->relax = src->relax; dst->use_preamble = src->use_preamble; dst->make_app = src->make_app; dst->optimize_level = src->optimize_level; fx_copy_str(&src->output_name, &dst->output_name); dst->print_ast0 = src->print_ast0; dst->print_ast = src->print_ast; dst->print_k0 = src->print_k0; dst->print_k = src->print_k; dst->print_tokens = src->print_tokens; dst->run_app = src->run_app; dst->verbose = src->verbose; dst->W_unused = src->W_unused; } static void _fx_make_R18Options__options_t( struct _fx_LS_data_t* r_app_args, fx_str_t* r_app_filename, bool r_arch64, bool r_force_rebuild, fx_str_t* r_build_dir, fx_str_t* r_build_rootdir, fx_str_t* r_cflags, fx_str_t* r_clibs, bool r_compile_by_cpp, fx_str_t* r_filename, bool r_gen_c, struct _fx_LS_data_t* r_include_path, bool r_debug, struct _fx_LT2SN17Options__optval_t_data_t* r_defines, int_ r_optim_iters, int_ r_inline_thresh, bool r_enable_openmp, bool r_relax, bool r_use_preamble, bool r_make_app, int_ r_optimize_level, fx_str_t* r_output_name, bool r_print_ast0, bool r_print_ast, bool r_print_k0, bool r_print_k, bool r_print_tokens, bool r_run_app, bool r_verbose, bool r_W_unused, struct _fx_R18Options__options_t* fx_result) { FX_COPY_PTR(r_app_args, &fx_result->app_args); fx_copy_str(r_app_filename, &fx_result->app_filename); fx_result->arch64 = r_arch64; fx_result->force_rebuild = r_force_rebuild; fx_copy_str(r_build_dir, &fx_result->build_dir); fx_copy_str(r_build_rootdir, &fx_result->build_rootdir); fx_copy_str(r_cflags, &fx_result->cflags); fx_copy_str(r_clibs, &fx_result->clibs); fx_result->compile_by_cpp = r_compile_by_cpp; fx_copy_str(r_filename, &fx_result->filename); fx_result->gen_c = r_gen_c; FX_COPY_PTR(r_include_path, &fx_result->include_path); fx_result->debug = r_debug; FX_COPY_PTR(r_defines, &fx_result->defines); fx_result->optim_iters = r_optim_iters; fx_result->inline_thresh = r_inline_thresh; fx_result->enable_openmp = r_enable_openmp; fx_result->relax = r_relax; fx_result->use_preamble = r_use_preamble; fx_result->make_app = r_make_app; fx_result->optimize_level = r_optimize_level; fx_copy_str(r_output_name, &fx_result->output_name); fx_result->print_ast0 = r_print_ast0; fx_result->print_ast = r_print_ast; fx_result->print_k0 = r_print_k0; fx_result->print_k = r_print_k; fx_result->print_tokens = r_print_tokens; fx_result->run_app = r_run_app; fx_result->verbose = r_verbose; fx_result->W_unused = r_W_unused; } static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static int _fx_cons_LN12Ast__scope_t( struct _fx_N12Ast__scope_t* hd, struct _fx_LN12Ast__scope_t_data_t* tl, bool addref_tl, struct _fx_LN12Ast__scope_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst) { fx_free_list_simple(&dst->val_flag_global); } static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst) { dst->val_flag_arg = src->val_flag_arg; dst->val_flag_mutable = src->val_flag_mutable; dst->val_flag_temp = src->val_flag_temp; dst->val_flag_tempref = src->val_flag_tempref; dst->val_flag_private = src->val_flag_private; dst->val_flag_subarray = src->val_flag_subarray; dst->val_flag_instance = src->val_flag_instance; dst->val_flag_method = src->val_flag_method; dst->val_flag_ctor = src->val_flag_ctor; FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global); } static void _fx_make_R16Ast__val_flags_t( bool r_val_flag_arg, bool r_val_flag_mutable, bool r_val_flag_temp, bool r_val_flag_tempref, bool r_val_flag_private, bool r_val_flag_subarray, bool r_val_flag_instance, struct _fx_T2R9Ast__id_ti* r_val_flag_method, int_ r_val_flag_ctor, struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global, struct _fx_R16Ast__val_flags_t* fx_result) { fx_result->val_flag_arg = r_val_flag_arg; fx_result->val_flag_mutable = r_val_flag_mutable; fx_result->val_flag_temp = r_val_flag_temp; fx_result->val_flag_tempref = r_val_flag_tempref; fx_result->val_flag_private = r_val_flag_private; fx_result->val_flag_subarray = r_val_flag_subarray; fx_result->val_flag_instance = r_val_flag_instance; fx_result->val_flag_method = *r_val_flag_method; fx_result->val_flag_ctor = r_val_flag_ctor; FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global); } static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t); } static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t); } static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst) { fx_free_str(&dst->ci_cname); _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods); fx_free_list_simple(&dst->ci_scope); } static void _fx_copy_R23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* src, struct _fx_R23C_form__cdefinterface_t* dst) { dst->ci_name = src->ci_name; fx_copy_str(&src->ci_cname, &dst->ci_cname); dst->ci_id = src->ci_id; dst->ci_vtbl = src->ci_vtbl; dst->ci_base = src->ci_base; FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods); FX_COPY_PTR(src->ci_scope, &dst->ci_scope); dst->ci_loc = src->ci_loc; } static void _fx_make_R23C_form__cdefinterface_t( struct _fx_R9Ast__id_t* r_ci_name, fx_str_t* r_ci_cname, struct _fx_R9Ast__id_t* r_ci_id, struct _fx_R9Ast__id_t* r_ci_vtbl, struct _fx_R9Ast__id_t* r_ci_base, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ci_scope, struct _fx_R10Ast__loc_t* r_ci_loc, struct _fx_R23C_form__cdefinterface_t* fx_result) { fx_result->ci_name = *r_ci_name; fx_copy_str(r_ci_cname, &fx_result->ci_cname); fx_result->ci_id = *r_ci_id; fx_result->ci_vtbl = *r_ci_vtbl; fx_result->ci_base = *r_ci_base; FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods); FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope); fx_result->ci_loc = *r_ci_loc; } static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t); } static int _fx_make_rR23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* arg, struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t); } static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t); } static int _fx_cons_LN15C_form__cstmt_t( struct _fx_N15C_form__cstmt_t_data_t* hd, struct _fx_LN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR); } static int _fx_cons_LN19C_form__carg_attr_t( struct _fx_N19C_form__carg_attr_t* hd, struct _fx_LN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_LN19C_form__carg_attr_t_data_t* t2, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst) { fx_free_str(&dst->cf_cname); _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args); _fx_free_N14C_form__ctyp_t(&dst->cf_rt); _fx_free_LN15C_form__cstmt_t(&dst->cf_body); fx_free_list_simple(&dst->cf_scope); } static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst) { dst->cf_name = src->cf_name; fx_copy_str(&src->cf_cname, &dst->cf_cname); FX_COPY_PTR(src->cf_args, &dst->cf_args); FX_COPY_PTR(src->cf_rt, &dst->cf_rt); FX_COPY_PTR(src->cf_body, &dst->cf_body); dst->cf_flags = src->cf_flags; FX_COPY_PTR(src->cf_scope, &dst->cf_scope); dst->cf_loc = src->cf_loc; } static void _fx_make_R17C_form__cdeffun_t( struct _fx_R9Ast__id_t* r_cf_name, fx_str_t* r_cf_cname, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args, struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt, struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body, struct _fx_R16Ast__fun_flags_t* r_cf_flags, struct _fx_LN12Ast__scope_t_data_t* r_cf_scope, struct _fx_R10Ast__loc_t* r_cf_loc, struct _fx_R17C_form__cdeffun_t* fx_result) { fx_result->cf_name = *r_cf_name; fx_copy_str(r_cf_cname, &fx_result->cf_cname); FX_COPY_PTR(r_cf_args, &fx_result->cf_args); FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt); FX_COPY_PTR(r_cf_body, &fx_result->cf_body); fx_result->cf_flags = *r_cf_flags; FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope); fx_result->cf_loc = *r_cf_loc; } static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t); } static int _fx_make_rR17C_form__cdeffun_t( struct _fx_R17C_form__cdeffun_t* arg, struct _fx_rR17C_form__cdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t); } static int _fx_cons_LR9Ast__id_t( struct _fx_R9Ast__id_t* hd, struct _fx_LR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst) { fx_free_list_simple(&dst->ctp_make); } static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst) { dst->ctp_scalar = src->ctp_scalar; dst->ctp_complex = src->ctp_complex; dst->ctp_ptr = src->ctp_ptr; dst->ctp_pass_by_ref = src->ctp_pass_by_ref; FX_COPY_PTR(src->ctp_make, &dst->ctp_make); dst->ctp_free = src->ctp_free; dst->ctp_copy = src->ctp_copy; } static void _fx_make_R17C_form__ctprops_t( bool r_ctp_scalar, bool r_ctp_complex, bool r_ctp_ptr, bool r_ctp_pass_by_ref, struct _fx_LR9Ast__id_t_data_t* r_ctp_make, struct _fx_Ta2R9Ast__id_t* r_ctp_free, struct _fx_Ta2R9Ast__id_t* r_ctp_copy, struct _fx_R17C_form__ctprops_t* fx_result) { fx_result->ctp_scalar = r_ctp_scalar; fx_result->ctp_complex = r_ctp_complex; fx_result->ctp_ptr = r_ctp_ptr; fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref; FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make); fx_result->ctp_free = *r_ctp_free; fx_result->ctp_copy = *r_ctp_copy; } static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->ct_typ); fx_free_str(&dst->ct_cname); _fx_free_R17C_form__ctprops_t(&dst->ct_props); fx_free_list_simple(&dst->ct_ifaces); fx_free_list_simple(&dst->ct_scope); } static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst) { dst->ct_name = src->ct_name; FX_COPY_PTR(src->ct_typ, &dst->ct_typ); fx_copy_str(&src->ct_cname, &dst->ct_cname); _fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props); dst->ct_data_start = src->ct_data_start; dst->ct_enum = src->ct_enum; FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces); dst->ct_ifaces_id = src->ct_ifaces_id; FX_COPY_PTR(src->ct_scope, &dst->ct_scope); dst->ct_loc = src->ct_loc; } static void _fx_make_R17C_form__cdeftyp_t( struct _fx_R9Ast__id_t* r_ct_name, struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ, fx_str_t* r_ct_cname, struct _fx_R17C_form__ctprops_t* r_ct_props, int_ r_ct_data_start, struct _fx_R9Ast__id_t* r_ct_enum, struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces, struct _fx_R9Ast__id_t* r_ct_ifaces_id, struct _fx_LN12Ast__scope_t_data_t* r_ct_scope, struct _fx_R10Ast__loc_t* r_ct_loc, struct _fx_R17C_form__cdeftyp_t* fx_result) { fx_result->ct_name = *r_ct_name; FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ); fx_copy_str(r_ct_cname, &fx_result->ct_cname); _fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props); fx_result->ct_data_start = r_ct_data_start; fx_result->ct_enum = *r_ct_enum; FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces); fx_result->ct_ifaces_id = *r_ct_ifaces_id; FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope); fx_result->ct_loc = *r_ct_loc; } static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t); } static int _fx_make_rR17C_form__cdeftyp_t( struct _fx_R17C_form__cdeftyp_t* arg, struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t); } static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__cexp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__cexp_t( struct _fx_Nt6option1N14C_form__cexp_t* src, struct _fx_Nt6option1N14C_form__cexp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { dst->t0 = src->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_Nt6option1N14C_form__cexp_t* t1, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result) { fx_result->t0 = *t0; _fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members); fx_free_str(&dst->cenum_cname); fx_free_list_simple(&dst->cenum_scope); } static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst) { dst->cenum_name = src->cenum_name; FX_COPY_PTR(src->cenum_members, &dst->cenum_members); fx_copy_str(&src->cenum_cname, &dst->cenum_cname); FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope); dst->cenum_loc = src->cenum_loc; } static void _fx_make_R18C_form__cdefenum_t( struct _fx_R9Ast__id_t* r_cenum_name, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members, fx_str_t* r_cenum_cname, struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope, struct _fx_R10Ast__loc_t* r_cenum_loc, struct _fx_R18C_form__cdefenum_t* fx_result) { fx_result->cenum_name = *r_cenum_name; FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members); fx_copy_str(r_cenum_cname, &fx_result->cenum_cname); FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope); fx_result->cenum_loc = *r_cenum_loc; } static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t); } static int _fx_make_rR18C_form__cdefenum_t( struct _fx_R18C_form__cdefenum_t* arg, struct _fx_rR18C_form__cdefenum_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t); } static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst) { fx_free_str(&dst->cm_cname); fx_free_list_simple(&dst->cm_args); _fx_free_LN15C_form__cstmt_t(&dst->cm_body); fx_free_list_simple(&dst->cm_scope); } static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst) { dst->cm_name = src->cm_name; fx_copy_str(&src->cm_cname, &dst->cm_cname); FX_COPY_PTR(src->cm_args, &dst->cm_args); FX_COPY_PTR(src->cm_body, &dst->cm_body); FX_COPY_PTR(src->cm_scope, &dst->cm_scope); dst->cm_loc = src->cm_loc; } static void _fx_make_R19C_form__cdefmacro_t( struct _fx_R9Ast__id_t* r_cm_name, fx_str_t* r_cm_cname, struct _fx_LR9Ast__id_t_data_t* r_cm_args, struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body, struct _fx_LN12Ast__scope_t_data_t* r_cm_scope, struct _fx_R10Ast__loc_t* r_cm_loc, struct _fx_R19C_form__cdefmacro_t* fx_result) { fx_result->cm_name = *r_cm_name; fx_copy_str(r_cm_cname, &fx_result->cm_cname); FX_COPY_PTR(r_cm_args, &fx_result->cm_args); FX_COPY_PTR(r_cm_body, &fx_result->cm_body); FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope); fx_result->cm_loc = *r_cm_loc; } static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t); } static int _fx_make_rR19C_form__cdefmacro_t( struct _fx_R19C_form__cdefmacro_t* arg, struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t); } static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t); } static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t); } static void _fx_free_R23K_form__kdefinterface_t(struct _fx_R23K_form__kdefinterface_t* dst) { fx_free_str(&dst->ki_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->ki_all_methods); fx_free_list_simple(&dst->ki_scope); } static void _fx_copy_R23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* src, struct _fx_R23K_form__kdefinterface_t* dst) { dst->ki_name = src->ki_name; dst->ki_base = src->ki_base; fx_copy_str(&src->ki_cname, &dst->ki_cname); dst->ki_id = src->ki_id; FX_COPY_PTR(src->ki_all_methods, &dst->ki_all_methods); FX_COPY_PTR(src->ki_scope, &dst->ki_scope); dst->ki_loc = src->ki_loc; } static void _fx_make_R23K_form__kdefinterface_t( struct _fx_R9Ast__id_t* r_ki_name, struct _fx_R9Ast__id_t* r_ki_base, fx_str_t* r_ki_cname, struct _fx_R9Ast__id_t* r_ki_id, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_ki_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ki_scope, struct _fx_R10Ast__loc_t* r_ki_loc, struct _fx_R23K_form__kdefinterface_t* fx_result) { fx_result->ki_name = *r_ki_name; fx_result->ki_base = *r_ki_base; fx_copy_str(r_ki_cname, &fx_result->ki_cname); fx_result->ki_id = *r_ki_id; FX_COPY_PTR(r_ki_all_methods, &fx_result->ki_all_methods); FX_COPY_PTR(r_ki_scope, &fx_result->ki_scope); fx_result->ki_loc = *r_ki_loc; } static void _fx_free_rR23K_form__kdefinterface_t(struct _fx_rR23K_form__kdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_free_R23K_form__kdefinterface_t); } static int _fx_make_rR23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* arg, struct _fx_rR23K_form__kdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_copy_R23K_form__kdefinterface_t); } static void _fx_free_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* dst) { fx_free_str(&dst->kf_cname); fx_free_list_simple(&dst->kf_params); _fx_free_N14K_form__ktyp_t(&dst->kf_rt); _fx_free_N14K_form__kexp_t(&dst->kf_body); fx_free_list_simple(&dst->kf_scope); } static void _fx_copy_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* src, struct _fx_R17K_form__kdeffun_t* dst) { dst->kf_name = src->kf_name; fx_copy_str(&src->kf_cname, &dst->kf_cname); FX_COPY_PTR(src->kf_params, &dst->kf_params); FX_COPY_PTR(src->kf_rt, &dst->kf_rt); FX_COPY_PTR(src->kf_body, &dst->kf_body); dst->kf_flags = src->kf_flags; dst->kf_closure = src->kf_closure; FX_COPY_PTR(src->kf_scope, &dst->kf_scope); dst->kf_loc = src->kf_loc; } static void _fx_make_R17K_form__kdeffun_t( struct _fx_R9Ast__id_t* r_kf_name, fx_str_t* r_kf_cname, struct _fx_LR9Ast__id_t_data_t* r_kf_params, struct _fx_N14K_form__ktyp_t_data_t* r_kf_rt, struct _fx_N14K_form__kexp_t_data_t* r_kf_body, struct _fx_R16Ast__fun_flags_t* r_kf_flags, struct _fx_R25K_form__kdefclosureinfo_t* r_kf_closure, struct _fx_LN12Ast__scope_t_data_t* r_kf_scope, struct _fx_R10Ast__loc_t* r_kf_loc, struct _fx_R17K_form__kdeffun_t* fx_result) { fx_result->kf_name = *r_kf_name; fx_copy_str(r_kf_cname, &fx_result->kf_cname); FX_COPY_PTR(r_kf_params, &fx_result->kf_params); FX_COPY_PTR(r_kf_rt, &fx_result->kf_rt); FX_COPY_PTR(r_kf_body, &fx_result->kf_body); fx_result->kf_flags = *r_kf_flags; fx_result->kf_closure = *r_kf_closure; FX_COPY_PTR(r_kf_scope, &fx_result->kf_scope); fx_result->kf_loc = *r_kf_loc; } static void _fx_free_rR17K_form__kdeffun_t(struct _fx_rR17K_form__kdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_free_R17K_form__kdeffun_t); } static int _fx_make_rR17K_form__kdeffun_t( struct _fx_R17K_form__kdeffun_t* arg, struct _fx_rR17K_form__kdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_copy_R17K_form__kdeffun_t); } static void _fx_free_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* dst) { fx_free_str(&dst->ke_cname); fx_free_str(&dst->ke_base_cname); _fx_free_N14K_form__ktyp_t(&dst->ke_typ); fx_free_list_simple(&dst->ke_scope); } static void _fx_copy_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* src, struct _fx_R17K_form__kdefexn_t* dst) { dst->ke_name = src->ke_name; fx_copy_str(&src->ke_cname, &dst->ke_cname); fx_copy_str(&src->ke_base_cname, &dst->ke_base_cname); FX_COPY_PTR(src->ke_typ, &dst->ke_typ); dst->ke_std = src->ke_std; dst->ke_tag = src->ke_tag; dst->ke_make = src->ke_make; FX_COPY_PTR(src->ke_scope, &dst->ke_scope); dst->ke_loc = src->ke_loc; } static void _fx_make_R17K_form__kdefexn_t( struct _fx_R9Ast__id_t* r_ke_name, fx_str_t* r_ke_cname, fx_str_t* r_ke_base_cname, struct _fx_N14K_form__ktyp_t_data_t* r_ke_typ, bool r_ke_std, struct _fx_R9Ast__id_t* r_ke_tag, struct _fx_R9Ast__id_t* r_ke_make, struct _fx_LN12Ast__scope_t_data_t* r_ke_scope, struct _fx_R10Ast__loc_t* r_ke_loc, struct _fx_R17K_form__kdefexn_t* fx_result) { fx_result->ke_name = *r_ke_name; fx_copy_str(r_ke_cname, &fx_result->ke_cname); fx_copy_str(r_ke_base_cname, &fx_result->ke_base_cname); FX_COPY_PTR(r_ke_typ, &fx_result->ke_typ); fx_result->ke_std = r_ke_std; fx_result->ke_tag = *r_ke_tag; fx_result->ke_make = *r_ke_make; FX_COPY_PTR(r_ke_scope, &fx_result->ke_scope); fx_result->ke_loc = *r_ke_loc; } static void _fx_free_rR17K_form__kdefexn_t(struct _fx_rR17K_form__kdefexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_free_R17K_form__kdefexn_t); } static int _fx_make_rR17K_form__kdefexn_t( struct _fx_R17K_form__kdefexn_t* arg, struct _fx_rR17K_form__kdefexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_copy_R17K_form__kdefexn_t); } static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t); } static int _fx_cons_LN14K_form__ktyp_t( struct _fx_N14K_form__ktyp_t_data_t* hd, struct _fx_LN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR); } static void _fx_free_T2R9Ast__id_tLR9Ast__id_t(struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* src, struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLR9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLR9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLR9Ast__id_t(struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_free_T2R9Ast__id_tLR9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_copy_T2R9Ast__id_tLR9Ast__id_t); } static void _fx_free_R21K_form__kdefvariant_t(struct _fx_R21K_form__kdefvariant_t* dst) { fx_free_str(&dst->kvar_cname); _fx_free_LN14K_form__ktyp_t(&dst->kvar_targs); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kvar_cases); fx_free_list_simple(&dst->kvar_ctors); _fx_free_LT2R9Ast__id_tLR9Ast__id_t(&dst->kvar_ifaces); fx_free_list_simple(&dst->kvar_scope); } static void _fx_copy_R21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* src, struct _fx_R21K_form__kdefvariant_t* dst) { dst->kvar_name = src->kvar_name; fx_copy_str(&src->kvar_cname, &dst->kvar_cname); dst->kvar_proto = src->kvar_proto; dst->kvar_props = src->kvar_props; FX_COPY_PTR(src->kvar_targs, &dst->kvar_targs); FX_COPY_PTR(src->kvar_cases, &dst->kvar_cases); FX_COPY_PTR(src->kvar_ctors, &dst->kvar_ctors); dst->kvar_flags = src->kvar_flags; FX_COPY_PTR(src->kvar_ifaces, &dst->kvar_ifaces); FX_COPY_PTR(src->kvar_scope, &dst->kvar_scope); dst->kvar_loc = src->kvar_loc; } static void _fx_make_R21K_form__kdefvariant_t( struct _fx_R9Ast__id_t* r_kvar_name, fx_str_t* r_kvar_cname, struct _fx_R9Ast__id_t* r_kvar_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kvar_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kvar_targs, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kvar_cases, struct _fx_LR9Ast__id_t_data_t* r_kvar_ctors, struct _fx_R16Ast__var_flags_t* r_kvar_flags, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* r_kvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_kvar_scope, struct _fx_R10Ast__loc_t* r_kvar_loc, struct _fx_R21K_form__kdefvariant_t* fx_result) { fx_result->kvar_name = *r_kvar_name; fx_copy_str(r_kvar_cname, &fx_result->kvar_cname); fx_result->kvar_proto = *r_kvar_proto; fx_result->kvar_props = *r_kvar_props; FX_COPY_PTR(r_kvar_targs, &fx_result->kvar_targs); FX_COPY_PTR(r_kvar_cases, &fx_result->kvar_cases); FX_COPY_PTR(r_kvar_ctors, &fx_result->kvar_ctors); fx_result->kvar_flags = *r_kvar_flags; FX_COPY_PTR(r_kvar_ifaces, &fx_result->kvar_ifaces); FX_COPY_PTR(r_kvar_scope, &fx_result->kvar_scope); fx_result->kvar_loc = *r_kvar_loc; } static void _fx_free_rR21K_form__kdefvariant_t(struct _fx_rR21K_form__kdefvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_free_R21K_form__kdefvariant_t); } static int _fx_make_rR21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* arg, struct _fx_rR21K_form__kdefvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_copy_R21K_form__kdefvariant_t); } static void _fx_free_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* dst) { fx_free_str(&dst->kt_cname); _fx_free_LN14K_form__ktyp_t(&dst->kt_targs); _fx_free_N14K_form__ktyp_t(&dst->kt_typ); fx_free_list_simple(&dst->kt_scope); } static void _fx_copy_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* src, struct _fx_R17K_form__kdeftyp_t* dst) { dst->kt_name = src->kt_name; fx_copy_str(&src->kt_cname, &dst->kt_cname); dst->kt_proto = src->kt_proto; dst->kt_props = src->kt_props; FX_COPY_PTR(src->kt_targs, &dst->kt_targs); FX_COPY_PTR(src->kt_typ, &dst->kt_typ); FX_COPY_PTR(src->kt_scope, &dst->kt_scope); dst->kt_loc = src->kt_loc; } static void _fx_make_R17K_form__kdeftyp_t( struct _fx_R9Ast__id_t* r_kt_name, fx_str_t* r_kt_cname, struct _fx_R9Ast__id_t* r_kt_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kt_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kt_targs, struct _fx_N14K_form__ktyp_t_data_t* r_kt_typ, struct _fx_LN12Ast__scope_t_data_t* r_kt_scope, struct _fx_R10Ast__loc_t* r_kt_loc, struct _fx_R17K_form__kdeftyp_t* fx_result) { fx_result->kt_name = *r_kt_name; fx_copy_str(r_kt_cname, &fx_result->kt_cname); fx_result->kt_proto = *r_kt_proto; fx_result->kt_props = *r_kt_props; FX_COPY_PTR(r_kt_targs, &fx_result->kt_targs); FX_COPY_PTR(r_kt_typ, &fx_result->kt_typ); FX_COPY_PTR(r_kt_scope, &fx_result->kt_scope); fx_result->kt_loc = *r_kt_loc; } static void _fx_free_rR17K_form__kdeftyp_t(struct _fx_rR17K_form__kdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_free_R17K_form__kdeftyp_t); } static int _fx_make_rR17K_form__kdeftyp_t( struct _fx_R17K_form__kdeftyp_t* arg, struct _fx_rR17K_form__kdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_copy_R17K_form__kdeftyp_t); } static void _fx_free_R25K_form__kdefclosurevars_t(struct _fx_R25K_form__kdefclosurevars_t* dst) { fx_free_str(&dst->kcv_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kcv_freevars); fx_free_list_simple(&dst->kcv_orig_freevars); fx_free_list_simple(&dst->kcv_scope); } static void _fx_copy_R25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* src, struct _fx_R25K_form__kdefclosurevars_t* dst) { dst->kcv_name = src->kcv_name; fx_copy_str(&src->kcv_cname, &dst->kcv_cname); FX_COPY_PTR(src->kcv_freevars, &dst->kcv_freevars); FX_COPY_PTR(src->kcv_orig_freevars, &dst->kcv_orig_freevars); FX_COPY_PTR(src->kcv_scope, &dst->kcv_scope); dst->kcv_loc = src->kcv_loc; } static void _fx_make_R25K_form__kdefclosurevars_t( struct _fx_R9Ast__id_t* r_kcv_name, fx_str_t* r_kcv_cname, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kcv_freevars, struct _fx_LR9Ast__id_t_data_t* r_kcv_orig_freevars, struct _fx_LN12Ast__scope_t_data_t* r_kcv_scope, struct _fx_R10Ast__loc_t* r_kcv_loc, struct _fx_R25K_form__kdefclosurevars_t* fx_result) { fx_result->kcv_name = *r_kcv_name; fx_copy_str(r_kcv_cname, &fx_result->kcv_cname); FX_COPY_PTR(r_kcv_freevars, &fx_result->kcv_freevars); FX_COPY_PTR(r_kcv_orig_freevars, &fx_result->kcv_orig_freevars); FX_COPY_PTR(r_kcv_scope, &fx_result->kcv_scope); fx_result->kcv_loc = *r_kcv_loc; } static void _fx_free_rR25K_form__kdefclosurevars_t(struct _fx_rR25K_form__kdefclosurevars_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_free_R25K_form__kdefclosurevars_t); } static int _fx_make_rR25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* arg, struct _fx_rR25K_form__kdefclosurevars_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_copy_R25K_form__kdefclosurevars_t); } static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__exp_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_R13Ast__defval_t(struct _fx_R13Ast__defval_t* dst) { _fx_free_N10Ast__typ_t(&dst->dv_typ); _fx_free_R16Ast__val_flags_t(&dst->dv_flags); fx_free_list_simple(&dst->dv_scope); } static void _fx_copy_R13Ast__defval_t(struct _fx_R13Ast__defval_t* src, struct _fx_R13Ast__defval_t* dst) { dst->dv_name = src->dv_name; FX_COPY_PTR(src->dv_typ, &dst->dv_typ); _fx_copy_R16Ast__val_flags_t(&src->dv_flags, &dst->dv_flags); FX_COPY_PTR(src->dv_scope, &dst->dv_scope); dst->dv_loc = src->dv_loc; } static void _fx_make_R13Ast__defval_t( struct _fx_R9Ast__id_t* r_dv_name, struct _fx_N10Ast__typ_t_data_t* r_dv_typ, struct _fx_R16Ast__val_flags_t* r_dv_flags, struct _fx_LN12Ast__scope_t_data_t* r_dv_scope, struct _fx_R10Ast__loc_t* r_dv_loc, struct _fx_R13Ast__defval_t* fx_result) { fx_result->dv_name = *r_dv_name; FX_COPY_PTR(r_dv_typ, &fx_result->dv_typ); _fx_copy_R16Ast__val_flags_t(r_dv_flags, &fx_result->dv_flags); FX_COPY_PTR(r_dv_scope, &fx_result->dv_scope); fx_result->dv_loc = *r_dv_loc; } static void _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->root); fx_free_fp(&dst->cmp); } static void _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { FX_COPY_PTR(src->root, &dst->root); FX_COPY_FP(&src->cmp, &dst->cmp); } static void _fx_make_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* r_root, struct _fx_FPi2R9Ast__id_tR9Ast__id_t* r_cmp, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { FX_COPY_PTR(r_root, &fx_result->root); FX_COPY_FP(r_cmp, &fx_result->cmp); } static void _fx_free_LN10Ast__pat_t(struct _fx_LN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__pat_t, _fx_free_N10Ast__pat_t); } static int _fx_cons_LN10Ast__pat_t( struct _fx_N10Ast__pat_t_data_t* hd, struct _fx_LN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__pat_t, FX_COPY_PTR); } static void _fx_free_rLR9Ast__id_t(struct _fx_rLR9Ast__id_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLR9Ast__id_t, fx_free_list_simple); } static int _fx_make_rLR9Ast__id_t(struct _fx_LR9Ast__id_t_data_t* arg, struct _fx_rLR9Ast__id_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLR9Ast__id_t, FX_COPY_PTR); } static void _fx_free_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* dst) { fx_free_list_simple(&dst->df_templ_args); _fx_free_LN10Ast__pat_t(&dst->df_args); _fx_free_N10Ast__typ_t(&dst->df_typ); _fx_free_N10Ast__exp_t(&dst->df_body); fx_free_list_simple(&dst->df_scope); _fx_free_rLR9Ast__id_t(&dst->df_templ_inst); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->df_env); } static void _fx_copy_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* src, struct _fx_R13Ast__deffun_t* dst) { dst->df_name = src->df_name; FX_COPY_PTR(src->df_templ_args, &dst->df_templ_args); FX_COPY_PTR(src->df_args, &dst->df_args); FX_COPY_PTR(src->df_typ, &dst->df_typ); FX_COPY_PTR(src->df_body, &dst->df_body); dst->df_flags = src->df_flags; FX_COPY_PTR(src->df_scope, &dst->df_scope); dst->df_loc = src->df_loc; FX_COPY_PTR(src->df_templ_inst, &dst->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->df_env, &dst->df_env); } static void _fx_make_R13Ast__deffun_t( struct _fx_R9Ast__id_t* r_df_name, struct _fx_LR9Ast__id_t_data_t* r_df_templ_args, struct _fx_LN10Ast__pat_t_data_t* r_df_args, struct _fx_N10Ast__typ_t_data_t* r_df_typ, struct _fx_N10Ast__exp_t_data_t* r_df_body, struct _fx_R16Ast__fun_flags_t* r_df_flags, struct _fx_LN12Ast__scope_t_data_t* r_df_scope, struct _fx_R10Ast__loc_t* r_df_loc, struct _fx_rLR9Ast__id_t_data_t* r_df_templ_inst, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* r_df_env, struct _fx_R13Ast__deffun_t* fx_result) { fx_result->df_name = *r_df_name; FX_COPY_PTR(r_df_templ_args, &fx_result->df_templ_args); FX_COPY_PTR(r_df_args, &fx_result->df_args); FX_COPY_PTR(r_df_typ, &fx_result->df_typ); FX_COPY_PTR(r_df_body, &fx_result->df_body); fx_result->df_flags = *r_df_flags; FX_COPY_PTR(r_df_scope, &fx_result->df_scope); fx_result->df_loc = *r_df_loc; FX_COPY_PTR(r_df_templ_inst, &fx_result->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(r_df_env, &fx_result->df_env); } static void _fx_free_rR13Ast__deffun_t(struct _fx_rR13Ast__deffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_free_R13Ast__deffun_t); } static int _fx_make_rR13Ast__deffun_t(struct _fx_R13Ast__deffun_t* arg, struct _fx_rR13Ast__deffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_copy_R13Ast__deffun_t); } static void _fx_free_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* dst) { _fx_free_N10Ast__typ_t(&dst->dexn_typ); fx_free_list_simple(&dst->dexn_scope); } static void _fx_copy_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* src, struct _fx_R13Ast__defexn_t* dst) { dst->dexn_name = src->dexn_name; FX_COPY_PTR(src->dexn_typ, &dst->dexn_typ); FX_COPY_PTR(src->dexn_scope, &dst->dexn_scope); dst->dexn_loc = src->dexn_loc; } static void _fx_make_R13Ast__defexn_t( struct _fx_R9Ast__id_t* r_dexn_name, struct _fx_N10Ast__typ_t_data_t* r_dexn_typ, struct _fx_LN12Ast__scope_t_data_t* r_dexn_scope, struct _fx_R10Ast__loc_t* r_dexn_loc, struct _fx_R13Ast__defexn_t* fx_result) { fx_result->dexn_name = *r_dexn_name; FX_COPY_PTR(r_dexn_typ, &fx_result->dexn_typ); FX_COPY_PTR(r_dexn_scope, &fx_result->dexn_scope); fx_result->dexn_loc = *r_dexn_loc; } static void _fx_free_rR13Ast__defexn_t(struct _fx_rR13Ast__defexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_free_R13Ast__defexn_t); } static int _fx_make_rR13Ast__defexn_t(struct _fx_R13Ast__defexn_t* arg, struct _fx_rR13Ast__defexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_copy_R13Ast__defexn_t); } static void _fx_free_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* dst) { fx_free_list_simple(&dst->dt_templ_args); _fx_free_N10Ast__typ_t(&dst->dt_typ); fx_free_list_simple(&dst->dt_scope); } static void _fx_copy_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* src, struct _fx_R13Ast__deftyp_t* dst) { dst->dt_name = src->dt_name; FX_COPY_PTR(src->dt_templ_args, &dst->dt_templ_args); FX_COPY_PTR(src->dt_typ, &dst->dt_typ); dst->dt_finalized = src->dt_finalized; FX_COPY_PTR(src->dt_scope, &dst->dt_scope); dst->dt_loc = src->dt_loc; } static void _fx_make_R13Ast__deftyp_t( struct _fx_R9Ast__id_t* r_dt_name, struct _fx_LR9Ast__id_t_data_t* r_dt_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dt_typ, bool r_dt_finalized, struct _fx_LN12Ast__scope_t_data_t* r_dt_scope, struct _fx_R10Ast__loc_t* r_dt_loc, struct _fx_R13Ast__deftyp_t* fx_result) { fx_result->dt_name = *r_dt_name; FX_COPY_PTR(r_dt_templ_args, &fx_result->dt_templ_args); FX_COPY_PTR(r_dt_typ, &fx_result->dt_typ); fx_result->dt_finalized = r_dt_finalized; FX_COPY_PTR(r_dt_scope, &fx_result->dt_scope); fx_result->dt_loc = *r_dt_loc; } static void _fx_free_rR13Ast__deftyp_t(struct _fx_rR13Ast__deftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_free_R13Ast__deftyp_t); } static int _fx_make_rR13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* arg, struct _fx_rR13Ast__deftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_copy_R13Ast__deftyp_t); } static void _fx_free_T2R9Ast__id_tN10Ast__typ_t(struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* src, struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__typ_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__typ_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__typ_t(struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_free_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_copy_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LTa2R9Ast__id_t( struct _fx_Ta2R9Ast__id_t* hd, struct _fx_LTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LTa2R9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* src, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LTa2R9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t); } static void _fx_free_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* dst) { fx_free_list_simple(&dst->dvar_templ_args); _fx_free_N10Ast__typ_t(&dst->dvar_alias); _fx_free_LT2R9Ast__id_tN10Ast__typ_t(&dst->dvar_cases); fx_free_list_simple(&dst->dvar_ctors); _fx_free_rLR9Ast__id_t(&dst->dvar_templ_inst); _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(&dst->dvar_ifaces); fx_free_list_simple(&dst->dvar_scope); } static void _fx_copy_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* src, struct _fx_R17Ast__defvariant_t* dst) { dst->dvar_name = src->dvar_name; FX_COPY_PTR(src->dvar_templ_args, &dst->dvar_templ_args); FX_COPY_PTR(src->dvar_alias, &dst->dvar_alias); dst->dvar_flags = src->dvar_flags; FX_COPY_PTR(src->dvar_cases, &dst->dvar_cases); FX_COPY_PTR(src->dvar_ctors, &dst->dvar_ctors); FX_COPY_PTR(src->dvar_templ_inst, &dst->dvar_templ_inst); FX_COPY_PTR(src->dvar_ifaces, &dst->dvar_ifaces); FX_COPY_PTR(src->dvar_scope, &dst->dvar_scope); dst->dvar_loc = src->dvar_loc; } static void _fx_make_R17Ast__defvariant_t( struct _fx_R9Ast__id_t* r_dvar_name, struct _fx_LR9Ast__id_t_data_t* r_dvar_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dvar_alias, struct _fx_R16Ast__var_flags_t* r_dvar_flags, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* r_dvar_cases, struct _fx_LR9Ast__id_t_data_t* r_dvar_ctors, struct _fx_rLR9Ast__id_t_data_t* r_dvar_templ_inst, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* r_dvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_dvar_scope, struct _fx_R10Ast__loc_t* r_dvar_loc, struct _fx_R17Ast__defvariant_t* fx_result) { fx_result->dvar_name = *r_dvar_name; FX_COPY_PTR(r_dvar_templ_args, &fx_result->dvar_templ_args); FX_COPY_PTR(r_dvar_alias, &fx_result->dvar_alias); fx_result->dvar_flags = *r_dvar_flags; FX_COPY_PTR(r_dvar_cases, &fx_result->dvar_cases); FX_COPY_PTR(r_dvar_ctors, &fx_result->dvar_ctors); FX_COPY_PTR(r_dvar_templ_inst, &fx_result->dvar_templ_inst); FX_COPY_PTR(r_dvar_ifaces, &fx_result->dvar_ifaces); FX_COPY_PTR(r_dvar_scope, &fx_result->dvar_scope); fx_result->dvar_loc = *r_dvar_loc; } static void _fx_free_rR17Ast__defvariant_t(struct _fx_rR17Ast__defvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_free_R17Ast__defvariant_t); } static int _fx_make_rR17Ast__defvariant_t( struct _fx_R17Ast__defvariant_t* arg, struct _fx_rR17Ast__defvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_copy_R17Ast__defvariant_t); } static void _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* src, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R16Ast__fun_flags_t* t2, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static int _fx_cons_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* hd, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static void _fx_free_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* dst) { _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_new_methods); _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_all_methods); fx_free_list_simple(&dst->di_scope); } static void _fx_copy_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* src, struct _fx_R19Ast__definterface_t* dst) { dst->di_name = src->di_name; dst->di_base = src->di_base; FX_COPY_PTR(src->di_new_methods, &dst->di_new_methods); FX_COPY_PTR(src->di_all_methods, &dst->di_all_methods); FX_COPY_PTR(src->di_scope, &dst->di_scope); dst->di_loc = src->di_loc; } static void _fx_make_R19Ast__definterface_t( struct _fx_R9Ast__id_t* r_di_name, struct _fx_R9Ast__id_t* r_di_base, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_new_methods, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_di_scope, struct _fx_R10Ast__loc_t* r_di_loc, struct _fx_R19Ast__definterface_t* fx_result) { fx_result->di_name = *r_di_name; fx_result->di_base = *r_di_base; FX_COPY_PTR(r_di_new_methods, &fx_result->di_new_methods); FX_COPY_PTR(r_di_all_methods, &fx_result->di_all_methods); FX_COPY_PTR(r_di_scope, &fx_result->di_scope); fx_result->di_loc = *r_di_loc; } static void _fx_free_rR19Ast__definterface_t(struct _fx_rR19Ast__definterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_free_R19Ast__definterface_t); } static int _fx_make_rR19Ast__definterface_t( struct _fx_R19Ast__definterface_t* arg, struct _fx_rR19Ast__definterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_copy_R19Ast__definterface_t); } static void _fx_free_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* dst) { switch (dst->tag) { case 2: _fx_free_R13Ast__defval_t(&dst->u.IdDVal); break; case 3: _fx_free_rR13Ast__deffun_t(&dst->u.IdFun); break; case 4: _fx_free_rR13Ast__defexn_t(&dst->u.IdExn); break; case 5: _fx_free_rR13Ast__deftyp_t(&dst->u.IdTyp); break; case 6: _fx_free_rR17Ast__defvariant_t(&dst->u.IdVariant); break; case 7: _fx_free_rR19Ast__definterface_t(&dst->u.IdInterface); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* src, struct _fx_N14Ast__id_info_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_R13Ast__defval_t(&src->u.IdDVal, &dst->u.IdDVal); break; case 3: FX_COPY_PTR(src->u.IdFun, &dst->u.IdFun); break; case 4: FX_COPY_PTR(src->u.IdExn, &dst->u.IdExn); break; case 5: FX_COPY_PTR(src->u.IdTyp, &dst->u.IdTyp); break; case 6: FX_COPY_PTR(src->u.IdVariant, &dst->u.IdVariant); break; case 7: FX_COPY_PTR(src->u.IdInterface, &dst->u.IdInterface); break; default: dst->u = src->u; } } static void _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { fx_free_arr(&dst->t1); _fx_free_N14Ast__id_info_t(&dst->t2); } static void _fx_copy_T3iA1N14Ast__id_info_tN14Ast__id_info_t( struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* src, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_arr(&src->t1, &dst->t1); _fx_copy_N14Ast__id_info_t(&src->t2, &dst->t2); } static void _fx_make_T3iA1N14Ast__id_info_tN14Ast__id_info_t( int_ t0, fx_arr_t* t1, struct _fx_N14Ast__id_info_t* t2, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* fx_result) { fx_result->t0 = t0; fx_copy_arr(t1, &fx_result->t1); _fx_copy_N14Ast__id_info_t(t2, &fx_result->t2); } static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(&(*dst)->u.t); fx_free(*dst); } *dst = 0; } static void _fx_free_LN16Ast__env_entry_t(struct _fx_LN16Ast__env_entry_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN16Ast__env_entry_t, _fx_free_N16Ast__env_entry_t); } static int _fx_cons_LN16Ast__env_entry_t( struct _fx_N16Ast__env_entry_t_data_t* hd, struct _fx_LN16Ast__env_entry_t_data_t* tl, bool addref_tl, struct _fx_LN16Ast__env_entry_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN16Ast__env_entry_t, FX_COPY_PTR); } static void _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t1); _fx_free_LN16Ast__env_entry_t(&dst->t3); _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t4); } static void _fx_copy_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_N12Map__color_t* t0, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1, struct _fx_R9Ast__id_t* t2, struct _fx_LN16Ast__env_entry_t_data_t* t3, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); } static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( &(*dst)->u.Node); fx_free(*dst); } *dst = 0; } static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N10Ast__lit_t(struct _fx_N10Ast__lit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.LitString); break; default: ; } dst->tag = 0; } static void _fx_copy_N10Ast__lit_t(struct _fx_N10Ast__lit_t* src, struct _fx_N10Ast__lit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.LitString, &dst->u.LitString); break; default: dst->u = src->u; } } static void _fx_free_rNt6option1N10Ast__typ_t(struct _fx_rNt6option1N10Ast__typ_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, _fx_free_Nt6option1N10Ast__typ_t); } static int _fx_make_rNt6option1N10Ast__typ_t( struct _fx_Nt6option1N10Ast__typ_t_data_t* arg, struct _fx_rNt6option1N10Ast__typ_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_LN10Ast__typ_t(struct _fx_LN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__typ_t, _fx_free_N10Ast__typ_t); } static int _fx_cons_LN10Ast__typ_t( struct _fx_N10Ast__typ_t_data_t* hd, struct _fx_LN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* src, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* src, struct _fx_T2iN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN10Ast__typ_t(int_ t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2iN10Ast__typ_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_free_R16Ast__val_flags_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t2); _fx_free_N10Ast__exp_t(&dst->t3); } static void _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* src, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_copy_R16Ast__val_flags_t(&src->t0, &dst->t0); dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); } static void _fx_make_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_R16Ast__val_flags_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_N10Ast__typ_t_data_t* t2, struct _fx_N10Ast__exp_t_data_t* t3, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* fx_result) { _fx_copy_R16Ast__val_flags_t(t0, &fx_result->t0); fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); } static void _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static int _fx_cons_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* hd, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static void _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(&dst->t0); } static void _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* src, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0, bool t1, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** dst) { FX_FREE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static int _fx_make_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* arg, struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static void _fx_free_T2LN10Ast__typ_tR9Ast__id_t(struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); } static void _fx_copy_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_T2LN10Ast__typ_tR9Ast__id_t* src, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_rNt6option1N10Ast__typ_t(&(*dst)->u.TypVar); break; case 2: _fx_free_Nt6option1N10Ast__typ_t(&(*dst)->u.TypVarTuple); break; case 3: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVarArray); break; case 13: _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(&(*dst)->u.TypFun); break; case 14: _fx_free_N10Ast__typ_t(&(*dst)->u.TypList); break; case 15: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVector); break; case 16: _fx_free_LN10Ast__typ_t(&(*dst)->u.TypTuple); break; case 17: _fx_free_N10Ast__typ_t(&(*dst)->u.TypRef); break; case 18: _fx_free_T2iN10Ast__typ_t(&(*dst)->u.TypArray); break; case 19: _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(&(*dst)->u.TypRecord); break; case 23: _fx_free_T2LN10Ast__typ_tR9Ast__id_t(&(*dst)->u.TypApp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 27: _fx_free_N13Ast__binary_t(&(*dst)->u.OpAugBinary); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__typ_t(&dst->t0); } static void _fx_copy_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__typ_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); _fx_free_Nt6option1N10Ast__exp_t(&dst->t1); _fx_free_Nt6option1N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_Nt6option1N10Ast__exp_t_data_t* t1, struct _fx_Nt6option1N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N13Ast__binary_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__binary_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN10Ast__exp_t(struct _fx_LN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__exp_t, _fx_free_N10Ast__exp_t); } static int _fx_cons_LN10Ast__exp_t( struct _fx_N10Ast__exp_t_data_t* hd, struct _fx_LN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN10Ast__exp_t(struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* src, struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__exp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__exp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LLN10Ast__exp_t(struct _fx_LLN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLN10Ast__exp_t, _fx_free_LN10Ast__exp_t); } static int _fx_cons_LLN10Ast__exp_t( struct _fx_LN10Ast__exp_t_data_t* hd, struct _fx_LLN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LLN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LLN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LLN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__exp_t(struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_free_T2R9Ast__id_tN10Ast__exp_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_copy_T2R9Ast__id_tN10Ast__exp_t); } static void _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t3); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN10Ast__exp_t_data_t* t3, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t4, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2N10Ast__exp_tR10Ast__loc_t(struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_T2N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__pat_tN10Ast__exp_t(struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* src, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_free_T2N10Ast__pat_tN10Ast__exp_t); } static int _fx_cons_LT2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* hd, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_copy_T2N10Ast__pat_tN10Ast__exp_t); } static void _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); } static void _fx_copy_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* src, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static int _fx_cons_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* hd, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static void _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3SST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3SST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, fx_str_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_R16Ast__val_flags_t(&dst->t2); } static void _fx_copy_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* src, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_R16Ast__val_flags_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__val_flags_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_R16Ast__val_flags_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static int _fx_cons_LT2iR9Ast__id_t( struct _fx_T2iR9Ast__id_t* hd, struct _fx_LT2iR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2iR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t0); } static void _fx_copy_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* src, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_LT2iR9Ast__id_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T3iLR9Ast__id_tR10Ast__loc_t( struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3iLR9Ast__id_tR10Ast__loc_t( int_ t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* dst) { _fx_free_LS(&dst->t0); } static void _fx_copy_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* src, struct _fx_T2LSR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LSR10Ast__loc_t( struct _fx_LS_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LSR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpReturn); break; case 5: _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpRange); break; case 6: _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpLit); break; case 7: _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIdent); break; case 8: _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpBinary); break; case 9: _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUnary); break; case 10: _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIntrin); break; case 11: _fx_free_T2R9Ast__id_tN10Ast__exp_t(&(*dst)->u.ExpSync); break; case 12: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpSeq); break; case 13: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkTuple); break; case 14: _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkArray); break; case 15: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkVector); break; case 16: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkRecord); break; case 17: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUpdateRecord); break; case 18: _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCall); break; case 19: _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpAt); break; case 20: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpAssign); break; case 21: _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMem); break; case 22: _fx_free_T2N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpThrow); break; case 23: _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIf); break; case 24: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpWhile); break; case 25: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpDoWhile); break; case 26: _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(&(*dst)->u.ExpFor); break; case 27: _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpMap); break; case 28: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTryCatch); break; case 29: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMatch); break; case 30: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCast); break; case 31: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTyped); break; case 32: _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCCode); break; case 33: _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpData); break; case 34: _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(&(*dst)->u.DefVal); break; case 35: _fx_free_rR13Ast__deffun_t(&(*dst)->u.DefFun); break; case 36: _fx_free_rR13Ast__defexn_t(&(*dst)->u.DefExn); break; case 37: _fx_free_rR13Ast__deftyp_t(&(*dst)->u.DefTyp); break; case 38: _fx_free_rR17Ast__defvariant_t(&(*dst)->u.DefVariant); break; case 39: _fx_free_rR19Ast__definterface_t(&(*dst)->u.DefInterface); break; case 40: _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImport); break; case 41: _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImportFrom); break; case 42: _fx_free_T2LSR10Ast__loc_t(&(*dst)->u.DirPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N10Ast__lit_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); } static void _fx_copy_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t0); } static void _fx_copy_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_LN10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN10Ast__pat_t(struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* src, struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__pat_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__pat_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__pat_t(struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_free_T2R9Ast__id_tN10Ast__pat_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_copy_T2R9Ast__id_tN10Ast__pat_t); } static void _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2N10Ast__pat_tR10Ast__loc_t(struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_T2N10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2N10Ast__lit_tR10Ast__loc_t(&(*dst)->u.PatLit); break; case 4: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatTuple); break; case 5: _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatVariant); break; case 6: _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRecord); break; case 7: _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatCons); break; case 8: _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(&(*dst)->u.PatAs); break; case 9: _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(&(*dst)->u.PatTyped); break; case 10: _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.PatWhen); break; case 11: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatAlt); break; case 12: _fx_free_T2N10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_N10Ast__typ_t(&(*dst)->u.EnvTyp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_LT2SR10Ast__loc_t(struct _fx_LT2SR10Ast__loc_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_free_T2SR10Ast__loc_t); } static int _fx_cons_LT2SR10Ast__loc_t( struct _fx_T2SR10Ast__loc_t* hd, struct _fx_LT2SR10Ast__loc_t_data_t* tl, bool addref_tl, struct _fx_LT2SR10Ast__loc_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_copy_T2SR10Ast__loc_t); } static int _fx_cons_Li(int_ hd, struct _fx_Li_data_t* tl, bool addref_tl, struct _fx_Li_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_Li, FX_COPY_SIMPLE); } static void _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { fx_free_str(&dst->t1); _fx_free_LN10Ast__exp_t(&dst->t4); fx_free_list_simple(&dst->t5); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t6); _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(&dst->t9); } static void _fx_copy_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* src, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); dst->t2 = src->t2; dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); FX_COPY_PTR(src->t5, &dst->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->t6, &dst->t6); dst->t7 = src->t7; dst->t8 = src->t8; FX_COPY_PTR(src->t9, &dst->t9); } static void _fx_make_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_R9Ast__id_t* t0, fx_str_t* t1, int_ t2, bool t3, struct _fx_LN10Ast__exp_t_data_t* t4, struct _fx_Li_data_t* t5, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* t6, bool t7, int_ t8, struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); fx_result->t2 = t2; fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); FX_COPY_PTR(t5, &fx_result->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(t6, &fx_result->t6); fx_result->t7 = t7; fx_result->t8 = t8; FX_COPY_PTR(t9, &fx_result->t9); } static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( &(*dst)->u.defmodule_t); fx_free(*dst); } *dst = 0; } static void _fx_free_LE(struct _fx_LE_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LE, fx_free_exn); } static int _fx_cons_LE(fx_exn_t* hd, struct _fx_LE_data_t* tl, bool addref_tl, struct _fx_LE_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LE, fx_copy_exn); } static void _fx_free_T2BS(struct _fx_T2BS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2BS(struct _fx_T2BS* src, struct _fx_T2BS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2BS(bool t0, fx_str_t* t1, struct _fx_T2BS* fx_result) { fx_result->t0 = t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N14Lexer__token_t(struct _fx_N14Lexer__token_t* dst) { switch (dst->tag) { case 1: _fx_free_N10Ast__lit_t(&dst->u.LITERAL); break; case 2: _fx_free_T2BS(&dst->u.IDENT); break; case 3: fx_free_str(&dst->u.TYVAR); break; case 13: fx_free_str(&dst->u.DATA); break; case 94: _fx_free_N13Ast__binary_t(&dst->u.AUG_BINOP); break; case 100: fx_free_str(&dst->u.RESERVED); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Lexer__token_t(struct _fx_N14Lexer__token_t* src, struct _fx_N14Lexer__token_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N10Ast__lit_t(&src->u.LITERAL, &dst->u.LITERAL); break; case 2: _fx_copy_T2BS(&src->u.IDENT, &dst->u.IDENT); break; case 3: fx_copy_str(&src->u.TYVAR, &dst->u.TYVAR); break; case 13: fx_copy_str(&src->u.DATA, &dst->u.DATA); break; case 94: FX_COPY_PTR(src->u.AUG_BINOP, &dst->u.AUG_BINOP); break; case 100: fx_copy_str(&src->u.RESERVED, &dst->u.RESERVED); break; default: dst->u = src->u; } } static void _fx_free_LN14Lexer__token_t(struct _fx_LN14Lexer__token_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_free_N14Lexer__token_t); } static int _fx_cons_LN14Lexer__token_t( struct _fx_N14Lexer__token_t* hd, struct _fx_LN14Lexer__token_t_data_t* tl, bool addref_tl, struct _fx_LN14Lexer__token_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_copy_N14Lexer__token_t); } static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.KLitString); break; case 8: _fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break; case 8: FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break; default: dst->u = src->u; } } static void _fx_free_N14K_form__atom_t(struct _fx_N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__klit_t(&dst->u.AtomLit); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__atom_t(struct _fx_N14K_form__atom_t* src, struct _fx_N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__klit_t(&src->u.AtomLit, &dst->u.AtomLit); break; default: dst->u = src->u; } } static void _fx_free_Nt6option1N14K_form__atom_t(struct _fx_Nt6option1N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__atom_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14K_form__atom_t( struct _fx_Nt6option1N14K_form__atom_t* src, struct _fx_Nt6option1N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__atom_t(&src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_LN14K_form__kexp_t(struct _fx_LN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__kexp_t, _fx_free_N14K_form__kexp_t); } static int _fx_cons_LN14K_form__kexp_t( struct _fx_N14K_form__kexp_t_data_t* hd, struct _fx_LN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__kexp_t, FX_COPY_PTR); } static void _fx_free_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* src, struct _fx_T2BN14K_form__atom_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); } static void _fx_make_T2BN14K_form__atom_t(bool t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2BN14K_form__atom_t* fx_result) { fx_result->t0 = t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); } static void _fx_free_LT2BN14K_form__atom_t(struct _fx_LT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_free_T2BN14K_form__atom_t); } static int _fx_cons_LT2BN14K_form__atom_t( struct _fx_T2BN14K_form__atom_t* hd, struct _fx_LT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_copy_T2BN14K_form__atom_t); } static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { _fx_free_LN14K_form__ktyp_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_LN14K_form__ktyp_t_data_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14K_form__ktyp_t( int_ t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2iN14K_form__ktyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 11: _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break; case 12: _fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break; case 13: _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break; case 15: _fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break; case 16: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break; case 17: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break; case 18: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); } static void _fx_copy_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* src, struct _fx_Ta3N14K_form__atom_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); } static void _fx_make_Ta3N14K_form__atom_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_Ta3N14K_form__atom_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); } static void _fx_free_N13K_form__dom_t(struct _fx_N13K_form__dom_t* dst) { switch (dst->tag) { case 1: _fx_free_N14K_form__atom_t(&dst->u.DomainElem); break; case 2: _fx_free_N14K_form__atom_t(&dst->u.DomainFast); break; case 3: _fx_free_Ta3N14K_form__atom_t(&dst->u.DomainRange); break; default: ; } dst->tag = 0; } static void _fx_copy_N13K_form__dom_t(struct _fx_N13K_form__dom_t* src, struct _fx_N13K_form__dom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N14K_form__atom_t(&src->u.DomainElem, &dst->u.DomainElem); break; case 2: _fx_copy_N14K_form__atom_t(&src->u.DomainFast, &dst->u.DomainFast); break; case 3: _fx_copy_Ta3N14K_form__atom_t(&src->u.DomainRange, &dst->u.DomainRange); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14K_form__atom_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_Nt6option1N14K_form__atom_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t0); } static void _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__ktyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N13Ast__binary_t(&dst->t0); _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__binary_t_data_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN14K_form__atom_t(struct _fx_LN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_free_N14K_form__atom_t); } static int _fx_cons_LN14K_form__atom_t( struct _fx_N14K_form__atom_t* hd, struct _fx_LN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_copy_N14K_form__atom_t); } static void _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN14K_form__kexp_t(struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_T2R9Ast__id_tN14K_form__kexp_t* src, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LLT2BN14K_form__atom_t(struct _fx_LLT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, _fx_free_LT2BN14K_form__atom_t); } static int _fx_cons_LLT2BN14K_form__atom_t( struct _fx_LT2BN14K_form__atom_t_data_t* hd, struct _fx_LLT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LLT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, FX_COPY_PTR); } static void _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LLT2BN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( bool t0, struct _fx_LLT2BN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2BN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2BN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LN13K_form__dom_t(struct _fx_LN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_free_N13K_form__dom_t); } static int _fx_cons_LN13K_form__dom_t( struct _fx_N13K_form__dom_t* hd, struct _fx_LN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_copy_N13K_form__dom_t); } static void _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_LN13K_form__dom_t(&dst->t3); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN13K_form__dom_t_data_t* t3, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t4, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* src, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t); } static int _fx_cons_LT2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* hd, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t); } static void _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN13K_form__dom_t(struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { _fx_free_N13K_form__dom_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* src, struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { dst->t0 = src->t0; _fx_copy_N13K_form__dom_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN13K_form__dom_t( struct _fx_R9Ast__id_t* t0, struct _fx_N13K_form__dom_t* t1, struct _fx_T2R9Ast__id_tN13K_form__dom_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N13K_form__dom_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN13K_form__dom_t(struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_free_T2R9Ast__id_tN13K_form__dom_t); } static int _fx_cons_LT2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* hd, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_copy_T2R9Ast__id_tN13K_form__dom_t); } static void _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* src, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1, struct _fx_LR9Ast__id_t_data_t* t2, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static int _fx_cons_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* hd, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static void _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t0); fx_free_list_simple(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); } static void _fx_copy_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpReturn); break; case 5: _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpAtom); break; case 6: _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpBinary); break; case 7: _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpUnary); break; case 8: _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIntrin); break; case 9: _fx_free_T2R9Ast__id_tN14K_form__kexp_t(&(*dst)->u.KExpSync); break; case 10: _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpSeq); break; case 11: _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIf); break; case 12: _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCall); break; case 13: _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpICall); break; case 14: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkTuple); break; case 15: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkRecord); break; case 16: _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkClosure); break; case 17: _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkArray); break; case 18: _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkVector); break; case 19: _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpAt); break; case 20: _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMem); break; case 21: _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpAssign); break; case 22: _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMatch); break; case 23: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpTryCatch); break; case 25: _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCast); break; case 26: _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpMap); break; case 27: _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( &(*dst)->u.KExpFor); break; case 28: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpWhile); break; case 29: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpDoWhile); break; case 30: _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCCode); break; case 31: _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KDefVal); break; case 32: _fx_free_rR17K_form__kdeffun_t(&(*dst)->u.KDefFun); break; case 33: _fx_free_rR17K_form__kdefexn_t(&(*dst)->u.KDefExn); break; case 34: _fx_free_rR21K_form__kdefvariant_t(&(*dst)->u.KDefVariant); break; case 35: _fx_free_rR23K_form__kdefinterface_t(&(*dst)->u.KDefInterface); break; case 36: _fx_free_rR17K_form__kdeftyp_t(&(*dst)->u.KDefTyp); break; case 37: _fx_free_rR25K_form__kdefclosurevars_t(&(*dst)->u.KDefClosureVars); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* dst) { _fx_free_LT2SR10Ast__loc_t(&dst->pragma_clibs); } static void _fx_copy_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* src, struct _fx_R14Ast__pragmas_t* dst) { dst->pragma_cpp = src->pragma_cpp; FX_COPY_PTR(src->pragma_clibs, &dst->pragma_clibs); } static void _fx_make_R14Ast__pragmas_t( bool r_pragma_cpp, struct _fx_LT2SR10Ast__loc_t_data_t* r_pragma_clibs, struct _fx_R14Ast__pragmas_t* fx_result) { fx_result->pragma_cpp = r_pragma_cpp; FX_COPY_PTR(r_pragma_clibs, &fx_result->pragma_clibs); } static void _fx_free_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* dst) { fx_free_str(&dst->km_cname); _fx_free_LN14K_form__kexp_t(&dst->km_top); fx_free_list_simple(&dst->km_deps); _fx_free_R14Ast__pragmas_t(&dst->km_pragmas); } static void _fx_copy_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* src, struct _fx_R17K_form__kmodule_t* dst) { dst->km_name = src->km_name; dst->km_idx = src->km_idx; dst->km_toposort_idx = src->km_toposort_idx; fx_copy_str(&src->km_cname, &dst->km_cname); FX_COPY_PTR(src->km_top, &dst->km_top); FX_COPY_PTR(src->km_deps, &dst->km_deps); dst->km_skip = src->km_skip; dst->km_main = src->km_main; _fx_copy_R14Ast__pragmas_t(&src->km_pragmas, &dst->km_pragmas); } static void _fx_make_R17K_form__kmodule_t( struct _fx_R9Ast__id_t* r_km_name, int_ r_km_idx, int_ r_km_toposort_idx, fx_str_t* r_km_cname, struct _fx_LN14K_form__kexp_t_data_t* r_km_top, struct _fx_Li_data_t* r_km_deps, bool r_km_skip, bool r_km_main, struct _fx_R14Ast__pragmas_t* r_km_pragmas, struct _fx_R17K_form__kmodule_t* fx_result) { fx_result->km_name = *r_km_name; fx_result->km_idx = r_km_idx; fx_result->km_toposort_idx = r_km_toposort_idx; fx_copy_str(r_km_cname, &fx_result->km_cname); FX_COPY_PTR(r_km_top, &fx_result->km_top); FX_COPY_PTR(r_km_deps, &fx_result->km_deps); fx_result->km_skip = r_km_skip; fx_result->km_main = r_km_main; _fx_copy_R14Ast__pragmas_t(r_km_pragmas, &fx_result->km_pragmas); } static void _fx_free_LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_free_R17K_form__kmodule_t); } static int _fx_cons_LR17K_form__kmodule_t( struct _fx_R17K_form__kmodule_t* hd, struct _fx_LR17K_form__kmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17K_form__kmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_copy_R17K_form__kmodule_t); } static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__ctyp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__ctyp_t( struct _fx_Nt6option1N14C_form__ctyp_t* src, struct _fx_Nt6option1N14C_form__ctyp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t); } static int _fx_cons_LN14C_form__ctyp_t( struct _fx_N14C_form__ctyp_t_data_t* hd, struct _fx_LN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { _fx_free_LN14C_form__ctyp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static int _fx_cons_LN19C_form__ctyp_attr_t( struct _fx_N19C_form__ctyp_attr_t* hd, struct _fx_LN19C_form__ctyp_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { fx_free_list_simple(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14C_form__ctyp_t( int_ t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2iN14C_form__ctyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 13: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break; case 14: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break; case 15: _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break; case 16: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break; case 17: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break; case 18: _fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break; case 19: _fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); } static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__klit_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__klit_t(&src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14K_form__klit_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__klit_t(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N17C_form__cbinary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N16C_form__cunary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t); } static int _fx_cons_LN14C_form__cexp_t( struct _fx_N14C_form__cexp_t_data_t* hd, struct _fx_LN14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR); } static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break; case 2: _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break; case 3: _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( &(*dst)->u.CExpBinary); break; case 4: _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break; case 5: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break; case 6: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break; case 7: _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break; case 8: _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary); break; case 9: _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break; case 10: _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break; case 11: _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break; case 12: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__cexp_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LN15C_form__cstmt_t(&dst->t0); } static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LN15C_form__cstmt_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_R9Ast__id_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); _fx_free_N15C_form__cstmt_t(&dst->t2); } static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_N15C_form__cstmt_t_data_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); _fx_free_LN14C_form__cexp_t(&dst->t3); _fx_free_N15C_form__cstmt_t(&dst->t4); } static void _fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); dst->t5 = src->t5; } static void _fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__ctyp_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_LN14C_form__cexp_t_data_t* t3, struct _fx_N15C_form__cstmt_t_data_t* t4, struct _fx_R10Ast__loc_t* t5, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); fx_result->t5 = *t5; } static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_N15C_form__cstmt_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); } static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break; case 3: _fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break; case 6: _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break; case 7: _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break; case 8: _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break; case 9: _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break; case 12: _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( &(*dst)->u.CStmtFor); break; case 13: _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break; case 14: _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break; case 15: _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break; case 16: _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break; case 17: _fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break; case 18: _fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break; case 21: _fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break; case 22: _fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break; case 23: _fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break; case 25: _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break; case 26: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break; case 27: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* dst) { fx_free_str(&dst->cmod_cname); _fx_free_LN15C_form__cstmt_t(&dst->cmod_ccode); _fx_free_R14Ast__pragmas_t(&dst->cmod_pragmas); } static void _fx_copy_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* src, struct _fx_R17C_form__cmodule_t* dst) { dst->cmod_name = src->cmod_name; fx_copy_str(&src->cmod_cname, &dst->cmod_cname); FX_COPY_PTR(src->cmod_ccode, &dst->cmod_ccode); dst->cmod_main = src->cmod_main; dst->cmod_recompile = src->cmod_recompile; dst->cmod_skip = src->cmod_skip; _fx_copy_R14Ast__pragmas_t(&src->cmod_pragmas, &dst->cmod_pragmas); } static void _fx_make_R17C_form__cmodule_t( struct _fx_R9Ast__id_t* r_cmod_name, fx_str_t* r_cmod_cname, struct _fx_LN15C_form__cstmt_t_data_t* r_cmod_ccode, bool r_cmod_main, bool r_cmod_recompile, bool r_cmod_skip, struct _fx_R14Ast__pragmas_t* r_cmod_pragmas, struct _fx_R17C_form__cmodule_t* fx_result) { fx_result->cmod_name = *r_cmod_name; fx_copy_str(r_cmod_cname, &fx_result->cmod_cname); FX_COPY_PTR(r_cmod_ccode, &fx_result->cmod_ccode); fx_result->cmod_main = r_cmod_main; fx_result->cmod_recompile = r_cmod_recompile; fx_result->cmod_skip = r_cmod_skip; _fx_copy_R14Ast__pragmas_t(r_cmod_pragmas, &fx_result->cmod_pragmas); } static void _fx_free_LR17C_form__cmodule_t(struct _fx_LR17C_form__cmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_free_R17C_form__cmodule_t); } static int _fx_cons_LR17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* hd, struct _fx_LR17C_form__cmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17C_form__cmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_copy_R17C_form__cmodule_t); } static void _fx_free_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* dst) { _fx_free_LN14Lexer__token_t(&dst->t0); } static void _fx_copy_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* src, struct _fx_T2LN14Lexer__token_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN14Lexer__token_tB( struct _fx_LN14Lexer__token_t_data_t* t0, bool t1, struct _fx_T2LN14Lexer__token_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_T2SB(struct _fx_T2SB* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SB(struct _fx_T2SB* src, struct _fx_T2SB* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SB(fx_str_t* t0, bool t1, struct _fx_T2SB* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_LT2SB(struct _fx_LT2SB_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SB, _fx_free_T2SB); } static int _fx_cons_LT2SB(struct _fx_T2SB* hd, struct _fx_LT2SB_data_t* tl, bool addref_tl, struct _fx_LT2SB_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SB, _fx_copy_T2SB); } static void _fx_free_T2SLS(struct _fx_T2SLS* dst) { fx_free_str(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_T2SLS(struct _fx_T2SLS* src, struct _fx_T2SLS* dst) { fx_copy_str(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2SLS(fx_str_t* t0, struct _fx_LS_data_t* t1, struct _fx_T2SLS* fx_result) { fx_copy_str(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_Ta2LS(struct _fx_Ta2LS* dst) { _fx_free_LS(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_Ta2LS(struct _fx_Ta2LS* src, struct _fx_Ta2LS* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_Ta2LS(struct _fx_LS_data_t* t0, struct _fx_LS_data_t* t1, struct _fx_Ta2LS* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iLi(struct _fx_T2iLi* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2iLi(struct _fx_T2iLi* src, struct _fx_T2iLi* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iLi(int_ t0, struct _fx_Li_data_t* t1, struct _fx_T2iLi* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2iLi(struct _fx_LT2iLi_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2iLi, _fx_free_T2iLi); } static int _fx_cons_LT2iLi( struct _fx_T2iLi* hd, struct _fx_LT2iLi_data_t* tl, bool addref_tl, struct _fx_LT2iLi_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iLi, _fx_copy_T2iLi); } static void _fx_free_rLi(struct _fx_rLi_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLi, fx_free_list_simple); } static int _fx_make_rLi(struct _fx_Li_data_t* arg, struct _fx_rLi_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLi, FX_COPY_PTR); } static void _fx_free_T3BBS(struct _fx_T3BBS* dst) { fx_free_str(&dst->t2); } static void _fx_copy_T3BBS(struct _fx_T3BBS* src, struct _fx_T3BBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_T3BBS(bool t0, bool t1, fx_str_t* t2, struct _fx_T3BBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; fx_copy_str(t2, &fx_result->t2); } static void _fx_free_T2LR17K_form__kmodule_tB(struct _fx_T2LR17K_form__kmodule_tB* dst) { _fx_free_LR17K_form__kmodule_t(&dst->t0); } static void _fx_copy_T2LR17K_form__kmodule_tB( struct _fx_T2LR17K_form__kmodule_tB* src, struct _fx_T2LR17K_form__kmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* t0, bool t1, struct _fx_T2LR17K_form__kmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_Ta9S(struct _fx_Ta9S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); fx_free_str(&dst->t4); fx_free_str(&dst->t5); fx_free_str(&dst->t6); fx_free_str(&dst->t7); fx_free_str(&dst->t8); } static void _fx_copy_Ta9S(struct _fx_Ta9S* src, struct _fx_Ta9S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); fx_copy_str(&src->t4, &dst->t4); fx_copy_str(&src->t5, &dst->t5); fx_copy_str(&src->t6, &dst->t6); fx_copy_str(&src->t7, &dst->t7); fx_copy_str(&src->t8, &dst->t8); } static void _fx_make_Ta9S( fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, fx_str_t* t4, fx_str_t* t5, fx_str_t* t6, fx_str_t* t7, fx_str_t* t8, struct _fx_Ta9S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); fx_copy_str(t4, &fx_result->t4); fx_copy_str(t5, &fx_result->t5); fx_copy_str(t6, &fx_result->t6); fx_copy_str(t7, &fx_result->t7); fx_copy_str(t8, &fx_result->t8); } static void _fx_free_Ta2S(struct _fx_Ta2S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); } static void _fx_copy_Ta2S(struct _fx_Ta2S* src, struct _fx_Ta2S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_Ta2S(fx_str_t* t0, fx_str_t* t1, struct _fx_Ta2S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); } static void _fx_free_Ta3S(struct _fx_Ta3S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); } static void _fx_copy_Ta3S(struct _fx_Ta3S* src, struct _fx_Ta3S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_Ta3S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, struct _fx_Ta3S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); } static void _fx_free_Ta4S(struct _fx_Ta4S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); } static void _fx_copy_Ta4S(struct _fx_Ta4S* src, struct _fx_Ta4S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); } static void _fx_make_Ta4S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, struct _fx_Ta4S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); } static void _fx_free_T5BBLSBS(struct _fx_T5BBLSBS* dst) { _fx_free_LS(&dst->t2); fx_free_str(&dst->t4); } static void _fx_copy_T5BBLSBS(struct _fx_T5BBLSBS* src, struct _fx_T5BBLSBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; fx_copy_str(&src->t4, &dst->t4); } static void _fx_make_T5BBLSBS(bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, fx_str_t* t4, struct _fx_T5BBLSBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; fx_copy_str(t4, &fx_result->t4); } static void _fx_free_T5BBLSBLS(struct _fx_T5BBLSBLS* dst) { _fx_free_LS(&dst->t2); _fx_free_LS(&dst->t4); } static void _fx_copy_T5BBLSBLS(struct _fx_T5BBLSBLS* src, struct _fx_T5BBLSBLS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5BBLSBLS( bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, struct _fx_LS_data_t* t4, struct _fx_T5BBLSBLS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); } static void _fx_free_T2LR17C_form__cmodule_tB(struct _fx_T2LR17C_form__cmodule_tB* dst) { _fx_free_LR17C_form__cmodule_t(&dst->t0); } static void _fx_copy_T2LR17C_form__cmodule_tB( struct _fx_T2LR17C_form__cmodule_tB* src, struct _fx_T2LR17C_form__cmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17C_form__cmodule_tB( struct _fx_LR17C_form__cmodule_t_data_t* t0, bool t1, struct _fx_T2LR17C_form__cmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } _fx_N14Lexer__token_t _fx_g14Compiler__FROM = { 20 }; _fx_N14Lexer__token_t _fx_g19Compiler__SEMICOLON = { 59 }; _fx_N14Lexer__token_t _fx_g19Compiler__PP_DEFINE = { 107 }; int _FX_EXN_E30Compiler__CumulativeParseError = 0; _fx_N20Compiler__msgcolor_t _fx_g16Compiler__MsgRed = { 1 }; _fx_N20Compiler__msgcolor_t _fx_g18Compiler__MsgGreen = { 2 }; _fx_N20Compiler__msgcolor_t _fx_g17Compiler__MsgBlue = { 3 }; bool _fx_g21Compiler__iscolorterm; fx_str_t _fx_g15Compiler__error = {0}; FX_EXTERN_C int _fx_F4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int_ _fx_M6StringFM4findi3SSi(fx_str_t* s, fx_str_t* part, int_ from_pos, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM9colortermB0(bool* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt) FX_EXTERN_C int _fx_M8FilenameFM8basenameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM16remove_extensionS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C bool _fx_F6__eq__B2SS(fx_str_t* a, fx_str_t* b, void* fx_fv); FX_EXTERN_C void _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(bool arg0, fx_str_t* arg1, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM4STARN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M3AstFM7LitBoolN10Ast__lit_t1B(bool arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M3AstFM6LitIntN10Ast__lit_t1l(int64_t arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M3AstFM9LitStringN10Ast__lit_t1S(fx_str_t* arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t( struct _fx_N10Ast__lit_t* arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C int _fx_M3SysFM7getpathLS1S(fx_str_t* name_0, struct _fx_LS_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM6getcwdS0(fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_LS_data_t* _fx_g9Sys__argv) FX_EXTERN_C int _fx_M8FilenameFM9normalizeS2SS(fx_str_t* dir_0, fx_str_t* fname_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM7dirnameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_major__) FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_minor__) FX_EXTERN_C int _fx_M8FilenameFM6existsB1S(fx_str_t* name, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM6get_idRM4id_t1S(fx_str_t* s_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM11find_modulei2RM4id_tS( struct _fx_R9Ast__id_t* mname_0, fx_str_t* mfname_0, int_* fx_result, void* fx_fv); FX_EXTERN_C_VAL(fx_arr_t _fx_g16Ast__all_modules) FX_EXTERN_C int _fx_M6ParserFM5parseB3iLN14Lexer__token_tLS( int_ m_idx_0, struct _fx_LN14Lexer__token_t_data_t* preamble_0, struct _fx_LS_data_t* inc_dirs_0, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10get_moduleN16Ast__defmodule_t1i( int_ m_0, struct _fx_N16Ast__defmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E22LexerUtils__LexerError) FX_EXTERN_C void _fx_F12print_stringv1S(fx_str_t* a, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E18Parser__ParseError) FX_EXTERN_C int _fx_M3AstFM6stringS1RM5loc_t(struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1E(fx_exn_t* a, fx_str_t* fx_result, void* fx_fv); static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM15get_module_nameRM4id_t1i(int_ m_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F9make_FailE1S(fx_str_t* arg0, fx_exn_t* fx_result); FX_EXTERN_C int _fx_M3SysFM5mkdirB2Si(fx_str_t* name, int_ permissions, bool* fx_result, void* fx_fv); FX_EXTERN_C_VAL(bool _fx_g10Sys__win32) FX_EXTERN_C int _fx_M8K_mangleFM12mangle_mnameS1S(fx_str_t* m_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4FileFM9read_utf8S1S(fx_str_t* fname, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int FX_EXN_IOError) FX_EXTERN_C_VAL(int FX_EXN_FileOpenError) FX_EXTERN_C int _fx_M4FileFM10write_utf8v2SS(fx_str_t* fname, fx_str_t* text, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM6removev1S(fx_str_t* name, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10pr_verbosev1S(fx_str_t* str_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* arg0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* arg1, struct _fx_N14K_form__kexp_t_data_t** fx_result); FX_EXTERN_C_VAL(struct _fx_LE_data_t* _fx_g21Ast__all_compile_errs) FX_EXTERN_C int _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool initial_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool final_mode_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_FPS1B _fx_g11Sys__osname) FX_EXTERN_C_VAL(bool _fx_g9Sys__unix) FX_EXTERN_C int _fx_M3SysFM6getenvS1S(fx_str_t* name, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid) FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM7commandi1S(fx_str_t* cmd, int_* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM8init_allv0(void* fx_fv); FX_EXTERN_C_VAL(fx_exn_t _fx_E30Compiler__CumulativeParseErrorv) FX_EXTERN_C_VAL(struct _fx_Li_data_t* _fx_g23Ast__all_modules_sorted) FX_EXTERN_C int _fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t* dm_0, void* fx_fv); FX_EXTERN_C int _fx_M13Ast_typecheckFM9check_modv1i(int_ m_idx_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM13init_all_idksv0(void* fx_fv); FX_EXTERN_C int _fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li( struct _fx_Li_data_t* modules_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM13init_all_idcsv0(void* fx_fv); FX_EXTERN_C int _fx_M9C_gen_stdFM14init_std_namesv0(void* fx_fv); FX_EXTERN_C int _fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* cmod_0, struct _fx_R17C_form__cmodule_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM17print_compile_errv1E(fx_exn_t* err_0, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E4Fail) FX_EXTERN_C_VAL(int _FX_EXN_E17Ast__CompileError) fx_exn_info_t _fx_E30Compiler__CumulativeParseError_info = {0}; fx_exn_t _fx_E30Compiler__CumulativeParseErrorv = {0}; FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LE(struct _fx_LE_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C void _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t( struct _fx_LN14Lexer__token_t_data_t* l1, struct _fx_LN14Lexer__token_t_data_t* l2, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C void _fx_M8CompilerFM5link2LS2LSLS( struct _fx_LS_data_t* l1, struct _fx_LS_data_t* l2, struct _fx_LS_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C int _fx_M8CompilerFM7__add__LS2LSLS( struct _fx_LS_data_t* l1_0, struct _fx_LS_data_t* l2_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { int fx_status = 0; if (l1_0 == 0) { FX_COPY_PTR(l2_0, fx_result); } else if (l2_0 == 0) { FX_COPY_PTR(l1_0, fx_result); } else { _fx_LS v_0 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = l1_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_0, l2_0, fx_result, 0); _fx_catch_1: ; if (v_0) { _fx_free_LS(&v_0); } } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; fx_copy_str(a_0, fx_result); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* l_0, fx_arr_t* fx_result, void* fx_fv) { int fx_status = 0; _fx_R17C_form__cmodule_t* dstptr_0 = 0; _fx_LR17C_form__cmodule_t lst_0 = l_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_R17C_form__cmodule_t), (fx_free_t)_fx_free_R17C_form__cmodule_t, (fx_copy_t)_fx_copy_R17C_form__cmodule_t, 0, fx_result), _fx_cleanup); } dstptr_0 = (_fx_R17C_form__cmodule_t*)fx_result->data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_R17C_form__cmodule_t* x_0 = &lst_0->hd; _fx_copy_R17C_form__cmodule_t(x_0, dstptr_0); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM3revLS1LS(struct _fx_LS_data_t* l_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { _fx_LS __fold_result___0 = 0; int fx_status = 0; _fx_LS lst_0 = l_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_LS r_0 = 0; fx_str_t* a_0 = &lst_0->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_LS(a_0, r_0, false, &r_0), _fx_catch_0); _fx_free_LS(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_0: ; if (r_0) { _fx_free_LS(&r_0); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; if (__fold_result___0) { _fx_free_LS(&__fold_result___0); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; FX_CALL(_fx_F4joinS2SLS(sep_0, strs_0, fx_result, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8containsB2SS(fx_str_t* s_0, fx_str_t* substr_0, bool* fx_result, void* fx_fv) { int fx_status = 0; int_ v_0 = _fx_M6StringFM4findi3SSi(s_0, substr_0, 0, 0); *fx_result = v_0 >= 0; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS( struct _fx_N20Compiler__msgcolor_t* clr_0, fx_str_t* msg_0, fx_str_t* fx_result, void* fx_fv) { fx_str_t esc_0 = {0}; int fx_status = 0; if (_fx_g21Compiler__iscolorterm) { int tag_0 = clr_0->tag; if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR(""); fx_copy_str(&slit_0, &esc_0); } else if (tag_0 == 2) { fx_str_t slit_1 = FX_MAKE_STR(""); fx_copy_str(&slit_1, &esc_0); } else if (tag_0 == 3) { fx_str_t slit_2 = FX_MAKE_STR(""); fx_copy_str(&slit_2, &esc_0); } else { fx_str_t slit_3 = FX_MAKE_STR(""); fx_copy_str(&slit_3, &esc_0); } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR(""); { const fx_str_t strs_0[] = { esc_0, *msg_0, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, fx_result), _fx_cleanup); } } else { fx_copy_str(msg_0, fx_result); } _fx_cleanup: ; FX_FREE_STR(&esc_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S( fx_str_t* mfname_0, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { _fx_LN14Lexer__token_t preamble_0 = 0; fx_str_t v_0 = {0}; fx_str_t bare_name_0 = {0}; _fx_T2LN14Lexer__token_tB __fold_result___0 = {0}; _fx_T2SB v_1 = {0}; _fx_T2SB v_2 = {0}; _fx_T2SB v_3 = {0}; _fx_T2SB v_4 = {0}; _fx_T2SB v_5 = {0}; _fx_T2SB v_6 = {0}; _fx_T2SB v_7 = {0}; _fx_LT2SB v_8 = 0; _fx_T2LN14Lexer__token_tB v_9 = {0}; _fx_LN14Lexer__token_t __fold_result___1 = 0; _fx_LT2SN17Options__optval_t v_10 = 0; int fx_status = 0; if (_fx_g12Options__opt.use_preamble) { FX_CALL(_fx_M8FilenameFM8basenameS1S(mfname_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_0, &bare_name_0, 0), _fx_cleanup); _fx_make_T2LN14Lexer__token_tB(0, false, &__fold_result___0); fx_str_t slit_0 = FX_MAKE_STR("Builtins"); _fx_make_T2SB(&slit_0, true, &v_1); fx_str_t slit_1 = FX_MAKE_STR("Math"); _fx_make_T2SB(&slit_1, true, &v_2); fx_str_t slit_2 = FX_MAKE_STR("Array"); _fx_make_T2SB(&slit_2, true, &v_3); fx_str_t slit_3 = FX_MAKE_STR("List"); _fx_make_T2SB(&slit_3, false, &v_4); fx_str_t slit_4 = FX_MAKE_STR("Vector"); _fx_make_T2SB(&slit_4, false, &v_5); fx_str_t slit_5 = FX_MAKE_STR("Char"); _fx_make_T2SB(&slit_5, false, &v_6); fx_str_t slit_6 = FX_MAKE_STR("String"); _fx_make_T2SB(&slit_6, false, &v_7); FX_CALL(_fx_cons_LT2SB(&v_7, 0, true, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_6, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_5, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_4, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_3, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_2, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_1, v_8, false, &v_8), _fx_cleanup); _fx_LT2SB lst_0 = v_8; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t mname_0 = {0}; _fx_T2LN14Lexer__token_tB v_11 = {0}; _fx_LN14Lexer__token_t preamble_1 = 0; _fx_T2LN14Lexer__token_tB v_12 = {0}; _fx_N14Lexer__token_t v_13 = {0}; _fx_N14Lexer__token_t v_14 = {0}; _fx_N14Lexer__token_t v_15 = {0}; _fx_LN14Lexer__token_t v_16 = 0; _fx_LN14Lexer__token_t v_17 = 0; _fx_N14Lexer__token_t v_18 = {0}; _fx_N14Lexer__token_t v_19 = {0}; _fx_LN14Lexer__token_t v_20 = 0; _fx_LN14Lexer__token_t v_21 = 0; _fx_T2SB* __pat___0 = &lst_0->hd; fx_copy_str(&__pat___0->t0, &mname_0); _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_11); FX_COPY_PTR(v_11.t0, &preamble_1); bool found_0 = v_11.t1; if (found_0) { _fx_make_T2LN14Lexer__token_tB(preamble_1, found_0, &v_12); } else { bool v_22 = _fx_F6__eq__B2SS(&bare_name_0, &mname_0, 0); if (v_22) { _fx_make_T2LN14Lexer__token_tB(preamble_1, true, &v_12); } else if (__pat___0->t1) { _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_13); _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(false, &v_14); _fx_M5LexerFM4STARN14Lexer__token_t1B(true, &v_15); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_15, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_14, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_13, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g14Compiler__FROM, v_16, false, &v_16), _fx_catch_4); if (preamble_1 == 0) { FX_COPY_PTR(v_16, &v_17); } else if (v_16 == 0) { FX_COPY_PTR(preamble_1, &v_17); } else { _fx_LN14Lexer__token_t v_23 = 0; _fx_LN14Lexer__token_t lstend_0 = 0; _fx_LN14Lexer__token_t lst_1 = preamble_1; for (; lst_1; lst_1 = lst_1->tl) { _fx_N14Lexer__token_t* x_0 = &lst_1->hd; _fx_LN14Lexer__token_t node_0 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_23, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_23, v_16, &v_17, 0); _fx_catch_1: ; if (v_23) { _fx_free_LN14Lexer__token_t(&v_23); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_17, false, &v_12); } else { _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(true, &v_18); _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_19); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_20), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_19, v_20, false, &v_20), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_18, v_20, false, &v_20), _fx_catch_4); if (preamble_1 == 0) { FX_COPY_PTR(v_20, &v_21); } else if (v_20 == 0) { FX_COPY_PTR(preamble_1, &v_21); } else { _fx_LN14Lexer__token_t v_24 = 0; _fx_LN14Lexer__token_t lstend_1 = 0; _fx_LN14Lexer__token_t lst_2 = preamble_1; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14Lexer__token_t* x_1 = &lst_2->hd; _fx_LN14Lexer__token_t node_1 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_24, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_24, v_20, &v_21, 0); _fx_catch_3: ; if (v_24) { _fx_free_LN14Lexer__token_t(&v_24); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_21, false, &v_12); } } _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_copy_T2LN14Lexer__token_tB(&v_12, &__fold_result___0); _fx_catch_4: ; if (v_21) { _fx_free_LN14Lexer__token_t(&v_21); } if (v_20) { _fx_free_LN14Lexer__token_t(&v_20); } _fx_free_N14Lexer__token_t(&v_19); _fx_free_N14Lexer__token_t(&v_18); if (v_17) { _fx_free_LN14Lexer__token_t(&v_17); } if (v_16) { _fx_free_LN14Lexer__token_t(&v_16); } _fx_free_N14Lexer__token_t(&v_15); _fx_free_N14Lexer__token_t(&v_14); _fx_free_N14Lexer__token_t(&v_13); _fx_free_T2LN14Lexer__token_tB(&v_12); if (preamble_1) { _fx_free_LN14Lexer__token_t(&preamble_1); } _fx_free_T2LN14Lexer__token_tB(&v_11); FX_FREE_STR(&mname_0); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_9); FX_COPY_PTR(v_9.t0, &preamble_0); } FX_COPY_PTR(preamble_0, &__fold_result___1); FX_COPY_PTR(_fx_g12Options__opt.defines, &v_10); _fx_LT2SN17Options__optval_t lst_3 = v_10; for (; lst_3; lst_3 = lst_3->tl) { fx_str_t n_0 = {0}; _fx_N17Options__optval_t v_25 = {0}; _fx_LN14Lexer__token_t p_0 = 0; _fx_N10Ast__lit_t v_26 = {0}; _fx_N14Lexer__token_t v_27 = {0}; _fx_N14Lexer__token_t v_28 = {0}; _fx_T2SN17Options__optval_t* __pat___1 = &lst_3->hd; fx_copy_str(&__pat___1->t0, &n_0); _fx_copy_N17Options__optval_t(&__pat___1->t1, &v_25); FX_COPY_PTR(__fold_result___1, &p_0); int tag_0 = v_25.tag; if (tag_0 == 1) { _fx_M3AstFM7LitBoolN10Ast__lit_t1B(v_25.u.OptBool, &v_26); } else if (tag_0 == 2) { _fx_M3AstFM6LitIntN10Ast__lit_t1l((int64_t)v_25.u.OptInt, &v_26); } else if (tag_0 == 3) { _fx_M3AstFM9LitStringN10Ast__lit_t1S(&v_25.u.OptString, &v_26); } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_catch_5); } FX_CHECK_EXN(_fx_catch_5); _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &n_0, &v_27); _fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t(&v_26, &v_28); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_28, p_0, false, &p_0), _fx_catch_5); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_27, p_0, false, &p_0), _fx_catch_5); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__PP_DEFINE, p_0, false, &p_0), _fx_catch_5); _fx_free_LN14Lexer__token_t(&__fold_result___1); FX_COPY_PTR(p_0, &__fold_result___1); _fx_catch_5: ; _fx_free_N14Lexer__token_t(&v_28); _fx_free_N14Lexer__token_t(&v_27); _fx_free_N10Ast__lit_t(&v_26); if (p_0) { _fx_free_LN14Lexer__token_t(&p_0); } _fx_free_N17Options__optval_t(&v_25); FX_FREE_STR(&n_0); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___1, fx_result); _fx_cleanup: ; if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&bare_name_0); _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_free_T2SB(&v_1); _fx_free_T2SB(&v_2); _fx_free_T2SB(&v_3); _fx_free_T2SB(&v_4); _fx_free_T2SB(&v_5); _fx_free_T2SB(&v_6); _fx_free_T2SB(&v_7); if (v_8) { _fx_free_LT2SB(&v_8); } _fx_free_T2LN14Lexer__token_tB(&v_9); if (__fold_result___1) { _fx_free_LN14Lexer__token_t(&__fold_result___1); } if (v_10) { _fx_free_LT2SN17Options__optval_t(&v_10); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM15find_ficus_dirsT2SLS0(struct _fx_T2SLS* fx_result, void* fx_fv) { _fx_LS ficus_path_0 = 0; fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t ficus_app_path_0 = {0}; fx_str_t v_3 = {0}; fx_str_t ficus_pp_path_0 = {0}; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_str_t v_7 = {0}; fx_str_t ficus_inst_path_0 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; _fx_LS v_12 = 0; _fx_LS std_ficus_path_0 = 0; _fx_Ta2LS v_13 = {0}; _fx_LS search_path_0 = 0; fx_str_t found_0 = {0}; int fx_status = 0; fx_str_t slit_0 = FX_MAKE_STR("FICUS_PATH"); FX_CALL(_fx_M3SysFM7getpathLS1S(&slit_0, &ficus_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_0, 0), _fx_cleanup); if (_fx_g9Sys__argv != 0) { fx_copy_str(&_fx_g9Sys__argv->hd, &v_1); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_cleanup); } FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_0, &v_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_2, &ficus_app_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_3, &ficus_pp_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_4, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_5, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_6, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("lib/ficus-"); fx_str_t slit_2 = FX_MAKE_STR("."); { const fx_str_t strs_0[] = { slit_1, v_5, slit_2, v_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 4, &v_7), _fx_cleanup); } FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_4, &v_7, &ficus_inst_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_8, 0), _fx_cleanup); fx_str_t slit_3 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_8, &slit_3, &v_9, 0), _fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_pp_path_0, &slit_4, &v_10, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_inst_path_0, &slit_5, &v_11, 0), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_11, 0, true, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_10, v_12, false, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_9, v_12, true, &std_ficus_path_0), _fx_cleanup); int_ std_ficus_path_len_0 = _fx_M8CompilerFM6lengthi1LS(std_ficus_path_0, 0); _fx_make_Ta2LS(std_ficus_path_0, ficus_path_0, &v_13); if (v_13.t0 == 0) { FX_COPY_PTR(ficus_path_0, &search_path_0); } else if (v_13.t1 == 0) { FX_COPY_PTR(std_ficus_path_0, &search_path_0); } else { _fx_LS v_14 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = std_ficus_path_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_14, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_14, ficus_path_0, &search_path_0, 0); _fx_catch_1: ; if (v_14) { _fx_free_LS(&v_14); } } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &found_0); int_ i_0 = 0; _fx_LS lst_1 = search_path_0; for (; lst_1; lst_1 = lst_1->tl, i_0 += 1) { fx_str_t builtins_fx_0 = {0}; fx_str_t ficus_h_0 = {0}; fx_str_t v_15 = {0}; _fx_LS v_16 = 0; _fx_Ta2LS v_17 = {0}; _fx_LS v_18 = 0; fx_str_t* d_0 = &lst_1->hd; fx_str_t slit_7 = FX_MAKE_STR("Builtins.fx"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_7, &builtins_fx_0, 0), _fx_catch_4); fx_str_t slit_8 = FX_MAKE_STR("../runtime/ficus/ficus.h"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_8, &ficus_h_0, 0), _fx_catch_4); bool v_19; bool res_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&builtins_fx_0, &res_0, 0), _fx_catch_4); if (res_0) { FX_CALL(_fx_M8FilenameFM6existsB1S(&ficus_h_0, &v_19, 0), _fx_catch_4); } else { v_19 = false; } if (v_19) { FX_CALL(_fx_M8FilenameFM7dirnameS1S(d_0, &v_15, 0), _fx_catch_4); FX_FREE_STR(&found_0); fx_copy_str(&v_15, &found_0); if (i_0 < std_ficus_path_len_0) { FX_CALL(_fx_cons_LS(d_0, 0, true, &v_16), _fx_catch_4); _fx_make_Ta2LS(ficus_path_0, v_16, &v_17); if (v_17.t0 == 0) { FX_COPY_PTR(v_16, &v_18); } else if (v_17.t1 == 0) { FX_COPY_PTR(ficus_path_0, &v_18); } else { _fx_LS v_20 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_2 = ficus_path_0; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t* x_1 = &lst_2->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_20, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_20, v_16, &v_18, 0); _fx_catch_3: ; if (v_20) { _fx_free_LS(&v_20); } } FX_CHECK_EXN(_fx_catch_4); _fx_free_LS(&ficus_path_0); FX_COPY_PTR(v_18, &ficus_path_0); } FX_BREAK(_fx_catch_4); } _fx_catch_4: ; if (v_18) { _fx_free_LS(&v_18); } _fx_free_Ta2LS(&v_17); if (v_16) { _fx_free_LS(&v_16); } FX_FREE_STR(&v_15); FX_FREE_STR(&ficus_h_0); FX_FREE_STR(&builtins_fx_0); FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } _fx_make_T2SLS(&found_0, ficus_path_0, fx_result); _fx_cleanup: ; if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&ficus_app_path_0); FX_FREE_STR(&v_3); FX_FREE_STR(&ficus_pp_path_0); FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); FX_FREE_STR(&v_7); FX_FREE_STR(&ficus_inst_path_0); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); if (v_12) { _fx_free_LS(&v_12); } if (std_ficus_path_0) { _fx_free_LS(&std_ficus_path_0); } _fx_free_Ta2LS(&v_13); if (search_path_0) { _fx_free_LS(&search_path_0); } FX_FREE_STR(&found_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM9parse_allB2SLS( fx_str_t* fname0_0, struct _fx_LS_data_t* ficus_path_0, bool* fx_result, void* fx_fv) { fx_str_t cwd_0 = {0}; fx_str_t fname0_1 = {0}; fx_str_t dir0_0 = {0}; _fx_LS inc_dirs0_0 = 0; _fx_LS v_0 = 0; _fx_LS v_1 = 0; _fx_LS inc_dirs0_1 = 0; _fx_LS inc_dirs0_2 = 0; _fx_LS inc_dirs0_3 = 0; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; _fx_Li queue_0 = 0; int fx_status = 0; FX_CALL(_fx_M8FilenameFM6getcwdS0(&cwd_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, fname0_0, &fname0_1, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&fname0_1, &dir0_0, 0), _fx_cleanup); bool v_4 = _fx_F6__eq__B2SS(&dir0_0, &cwd_0, 0); if (v_4) { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &inc_dirs0_0), _fx_cleanup); } else { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &v_0), _fx_cleanup); FX_CALL(_fx_cons_LS(&dir0_0, v_0, true, &inc_dirs0_0), _fx_cleanup); } FX_COPY_PTR(_fx_g12Options__opt.include_path, &v_1); if (inc_dirs0_0 == 0) { FX_COPY_PTR(v_1, &inc_dirs0_1); } else if (v_1 == 0) { FX_COPY_PTR(inc_dirs0_0, &inc_dirs0_1); } else { _fx_LS v_5 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = inc_dirs0_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_5, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_5, v_1, &inc_dirs0_1, 0); _fx_catch_1: ; if (v_5) { _fx_free_LS(&v_5); } } FX_CHECK_EXN(_fx_cleanup); if (inc_dirs0_1 == 0) { FX_COPY_PTR(ficus_path_0, &inc_dirs0_2); } else if (ficus_path_0 == 0) { FX_COPY_PTR(inc_dirs0_1, &inc_dirs0_2); } else { _fx_LS v_6 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_1 = inc_dirs0_1; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t* x_1 = &lst_1->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_6, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_6, ficus_path_0, &inc_dirs0_2, 0); _fx_catch_3: ; if (v_6) { _fx_free_LS(&v_6); } } FX_CHECK_EXN(_fx_cleanup); _fx_LS lstend_2 = 0; _fx_LS lst_2 = inc_dirs0_2; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t res_0 = {0}; fx_str_t* d_0 = &lst_2->hd; FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, d_0, &res_0, 0), _fx_catch_4); _fx_LS node_2 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_2), _fx_catch_4); FX_LIST_APPEND(inc_dirs0_3, lstend_2, node_2); _fx_catch_4: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M8FilenameFM8basenameS1S(&fname0_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_2, &v_3, 0), _fx_cleanup); _fx_R9Ast__id_t name0_id_0; FX_CALL(_fx_M3AstFM6get_idRM4id_t1S(&v_3, &name0_id_0, 0), _fx_cleanup); int_ m_idx_0; FX_CALL(_fx_M3AstFM11find_modulei2RM4id_tS(&name0_id_0, &fname0_1, &m_idx_0, 0), _fx_cleanup); FX_CALL(_fx_cons_Li(m_idx_0, 0, true, &queue_0), _fx_cleanup); bool ok_0 = true; while (queue_0 != 0) { _fx_Li v_7 = 0; _fx_N16Ast__defmodule_t minfo_0 = 0; fx_str_t mfname_0 = {0}; fx_exn_t exn_0 = {0}; int_ m_idx_1; if (queue_0 != 0) { m_idx_1 = queue_0->hd; } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); if (queue_0 != 0) { FX_COPY_PTR(queue_0->tl, &v_7); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_7, &queue_0); FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_13); FX_COPY_PTR(*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1), &minfo_0); fx_copy_str(&minfo_0->u.defmodule_t.t1, &mfname_0); if (!minfo_0->u.defmodule_t.t7) { fx_str_t dir1_0 = {0}; _fx_LS v_8 = 0; _fx_LS inc_dirs_0 = 0; _fx_LN14Lexer__token_t preamble_0 = 0; _fx_Li v_9 = 0; _fx_Li __fold_result___0 = 0; _fx_Li v_10 = 0; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); (*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t7 = true; FX_CALL(_fx_M8FilenameFM7dirnameS1S(&mfname_0, &dir1_0, 0), _fx_catch_9); bool v_11 = _fx_F6__eq__B2SS(&dir1_0, &dir0_0, 0); if (!v_11) { FX_CALL(_fx_cons_LS(&dir1_0, 0, true, &v_8), _fx_catch_9); } if (v_8 == 0) { FX_COPY_PTR(inc_dirs0_3, &inc_dirs_0); } else if (inc_dirs0_3 == 0) { FX_COPY_PTR(v_8, &inc_dirs_0); } else { _fx_LS v_12 = 0; _fx_LS lstend_3 = 0; _fx_LS lst_3 = v_8; for (; lst_3; lst_3 = lst_3->tl) { fx_str_t* x_2 = &lst_3->hd; _fx_LS node_3 = 0; FX_CALL(_fx_cons_LS(x_2, 0, false, &node_3), _fx_catch_5); FX_LIST_APPEND(v_12, lstend_3, node_3); _fx_catch_5: ; FX_CHECK_EXN(_fx_catch_6); } _fx_M8CompilerFM5link2LS2LSLS(v_12, inc_dirs0_3, &inc_dirs_0, 0); _fx_catch_6: ; if (v_12) { _fx_free_LS(&v_12); } } FX_CHECK_EXN(_fx_catch_9); FX_CALL(_fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S(&mfname_0, &preamble_0, 0), _fx_catch_9); bool v_13; FX_CALL(_fx_M6ParserFM5parseB3iLN14Lexer__token_tLS(m_idx_1, preamble_0, inc_dirs_0, &v_13, 0), _fx_catch_9); ok_0 = ok_0 && v_13; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); FX_COPY_PTR((*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t5, &v_9); _fx_Li lst_4 = v_9; for (; lst_4; lst_4 = lst_4->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_4->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_7); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_7: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_catch_9); } FX_COPY_PTR(__fold_result___0, &v_10); _fx_Li lst_5 = v_10; for (; lst_5; lst_5 = lst_5->tl) { _fx_N16Ast__defmodule_t dep_minfo_0 = 0; _fx_Li v_14 = 0; int_ dep_0 = lst_5->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(dep_0, &dep_minfo_0, 0), _fx_catch_8); if (!dep_minfo_0->u.defmodule_t.t7) { FX_CALL(_fx_cons_Li(dep_0, queue_0, true, &v_14), _fx_catch_8); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_14, &queue_0); } _fx_catch_8: ; FX_FREE_LIST_SIMPLE(&v_14); if (dep_minfo_0) { _fx_free_N16Ast__defmodule_t(&dep_minfo_0); } FX_CHECK_EXN(_fx_catch_9); } _fx_catch_9: ; FX_FREE_STR(&dir1_0); if (v_8) { _fx_free_LS(&v_8); } if (inc_dirs_0) { _fx_free_LS(&inc_dirs_0); } if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&v_10); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E22LexerUtils__LexerError) { fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; _fx_T2Ta2iS* vcase_0 = &FX_EXN_DATA(_fx_E22LexerUtils__LexerError_data_t, exn_0.data); _fx_Ta2i* v_18 = &vcase_0->t0; FX_CALL(_fx_F6stringS1i(v_18->t0, &v_15, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1i(v_18->t1, &v_16, 0), _fx_catch_10); fx_str_t slit_0 = FX_MAKE_STR(":"); fx_str_t slit_1 = FX_MAKE_STR(":"); fx_str_t slit_2 = FX_MAKE_STR(": error: "); fx_str_t* msg_0 = &vcase_0->t1; fx_str_t slit_3 = FX_MAKE_STR("\n"); { const fx_str_t strs_0[] = { mfname_0, slit_0, v_15, slit_1, v_16, slit_2, *msg_0, slit_3 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 8, &v_17), _fx_catch_10); } _fx_F12print_stringv1S(&v_17, 0); fx_str_t slit_4 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_4, 0); ok_0 = false; _fx_catch_10: ; FX_FREE_STR(&v_17); FX_FREE_STR(&v_16); FX_FREE_STR(&v_15); } else if (tag_0 == _FX_EXN_E18Parser__ParseError) { fx_str_t v_19 = {0}; fx_str_t v_20 = {0}; _fx_T2R10Ast__loc_tS* vcase_1 = &FX_EXN_DATA(_fx_E18Parser__ParseError_data_t, exn_0.data); FX_CALL(_fx_M3AstFM6stringS1RM5loc_t(&vcase_1->t0, &v_19, 0), _fx_catch_11); fx_str_t slit_5 = FX_MAKE_STR(": error: "); fx_str_t* msg_1 = &vcase_1->t1; fx_str_t slit_6 = FX_MAKE_STR("\n"); { const fx_str_t strs_1[] = { v_19, slit_5, *msg_1, slit_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 4, &v_20), _fx_catch_11); } _fx_F12print_stringv1S(&v_20, 0); fx_str_t slit_7 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_7, 0); ok_0 = false; _fx_catch_11: ; FX_FREE_STR(&v_20); FX_FREE_STR(&v_19); } else { fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; FX_CALL(_fx_F6stringS1E(&exn_0, &v_21, 0), _fx_catch_12); fx_str_t slit_8 = FX_MAKE_STR(": exception "); fx_str_t slit_9 = FX_MAKE_STR(" occured"); { const fx_str_t strs_2[] = { mfname_0, slit_8, v_21, slit_9 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 4, &v_22), _fx_catch_12); } _fx_F12print_stringv1S(&v_22, 0); fx_str_t slit_10 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_10, 0); ok_0 = false; _fx_catch_12: ; FX_FREE_STR(&v_22); FX_FREE_STR(&v_21); } FX_CHECK_EXN(_fx_catch_13); } } _fx_catch_13: ; fx_free_exn(&exn_0); FX_FREE_STR(&mfname_0); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_FREE_LIST_SIMPLE(&v_7); FX_CHECK_EXN(_fx_cleanup); } *fx_result = ok_0; _fx_cleanup: ; FX_FREE_STR(&cwd_0); FX_FREE_STR(&fname0_1); FX_FREE_STR(&dir0_0); if (inc_dirs0_0) { _fx_free_LS(&inc_dirs0_0); } if (v_0) { _fx_free_LS(&v_0); } if (v_1) { _fx_free_LS(&v_1); } if (inc_dirs0_1) { _fx_free_LS(&inc_dirs0_1); } if (inc_dirs0_2) { _fx_free_LS(&inc_dirs0_2); } if (inc_dirs0_3) { _fx_free_LS(&inc_dirs0_3); } FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_LIST_SIMPLE(&queue_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8toposortLi1LT2iLi( struct _fx_LT2iLi_data_t* graph_0, struct _fx_Li_data_t** fx_result, void* fx_fv) { fx_arr_t graph_1 = {0}; fx_arr_t processed_0 = {0}; _fx_rLi result_ref_0 = 0; _fx_Li __fold_result___0 = 0; _fx_Li result_0 = 0; int fx_status = 0; _fx_Li* dstptr_0 = 0; _fx_LT2iLi lst_0 = graph_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(_fx_Li), (fx_free_t)fx_free_list_simple, (fx_copy_t)fx_copy_ptr, 0, &graph_1), _fx_cleanup); } dstptr_0 = (_fx_Li*)graph_1.data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_T2iLi* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t1, dstptr_0); } int_ nvtx_0 = FX_ARR_SIZE(graph_1, 0); bool* dstptr_1 = 0; { const int_ shape_1[] = { nvtx_0 }; FX_CALL(fx_make_arr(1, shape_1, sizeof(bool), 0, 0, 0, &processed_0), _fx_cleanup); } dstptr_1 = (bool*)processed_0.data; for (int_ i_0 = 0; i_0 < nvtx_0; i_0++, dstptr_1++) { *dstptr_1 = false; } FX_CALL(_fx_make_rLi(0, &result_ref_0), _fx_cleanup); FX_CHKIDX_RANGE(FX_ARR_SIZE(processed_0, 0), 0, nvtx_0, 1, 1, 0, _fx_cleanup); for (int_ i_1 = 0; i_1 < nvtx_0; i_1++) { if (*FX_PTR_1D(bool, processed_0, i_1)) { FX_CONTINUE(_fx_catch_0); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(i_1, 0, &graph_1, &processed_0, result_ref_0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(result_ref_0->data, &result_0); _fx_Li lst_1 = result_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_1->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_1); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_1: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; FX_FREE_ARR(&graph_1); FX_FREE_ARR(&processed_0); if (result_ref_0) { _fx_free_rLi(&result_ref_0); } FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&result_0); return fx_status; } static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv) { _fx_Li deps_0 = 0; _fx_LS v_0 = 0; fx_str_t vlist_0 = {0}; fx_str_t v_1 = {0}; fx_exn_t v_2 = {0}; _fx_Li visited_1 = 0; _fx_Li v_3 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); _fx_Li* result_0 = &result_ref_0->data; FX_CHKIDX(FX_CHKIDX1(*graph_0, 0, i_0), _fx_cleanup); FX_COPY_PTR(*FX_PTR_1D(_fx_Li, *graph_0, i_0), &deps_0); bool __fold_result___0 = false; _fx_Li lst_0 = visited_0; for (; lst_0; lst_0 = lst_0->tl) { int_ b_0 = lst_0->hd; if (i_0 == b_0) { __fold_result___0 = true; FX_BREAK(_fx_catch_0); } _fx_catch_0: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } if (__fold_result___0) { _fx_LS lstend_0 = 0; _fx_Li lst_1 = visited_0; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ j_0 = lst_1->hd; _fx_R9Ast__id_t v_4; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(j_0, &v_4, 0), _fx_catch_1); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_4, &res_0, 0), _fx_catch_1); _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_0), _fx_catch_1); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_1: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_0 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_0, v_0, &vlist_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("error: cyclib dependency between the modules: "); { const fx_str_t strs_0[] = { slit_1, vlist_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_cleanup); } FX_CALL(_fx_F9make_FailE1S(&v_1, &v_2), _fx_cleanup); FX_THROW(&v_2, true, _fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, visited_0, true, &visited_1), _fx_cleanup); _fx_Li lst_2 = deps_0; for (; lst_2; lst_2 = lst_2->tl) { int_ j_1 = lst_2->hd; FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, j_1), _fx_catch_2); if (*FX_PTR_1D(bool, *processed_0, j_1)) { FX_CONTINUE(_fx_catch_2); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(j_1, visited_1, graph_0, processed_0, result_ref_0, 0), _fx_catch_2); _fx_catch_2: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, *result_0, true, &v_3), _fx_cleanup); FX_FREE_LIST_SIMPLE(result_0); FX_COPY_PTR(v_3, result_0); FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, i_0), _fx_cleanup); *FX_PTR_1D(bool, *processed_0, i_0) = true; _fx_cleanup: ; FX_FREE_LIST_SIMPLE(&deps_0); if (v_0) { _fx_free_LS(&v_0); } FX_FREE_STR(&vlist_0); FX_FREE_STR(&v_1); fx_free_exn(&v_2); FX_FREE_LIST_SIMPLE(&visited_1); FX_FREE_LIST_SIMPLE(&v_3); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv) { fx_arr_t skip_flags_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; fx_str_t obj_ext_0 = {0}; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_exn_t v_0 = {0}; int fx_status = 0; bool* dstptr_0 = 0; int_ v_1 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); { const int_ shape_0[] = { v_1 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(bool), 0, 0, 0, &skip_flags_0), _fx_cleanup); } dstptr_0 = (bool*)skip_flags_0.data; for (int_ i_0 = 0; i_0 < v_1; i_0++, dstptr_0++) { *dstptr_0 = false; } fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { fx_str_t slit_0 = FX_MAKE_STR(".obj"); fx_copy_str(&slit_0, &obj_ext_0); } else { fx_str_t slit_1 = FX_MAKE_STR(".o"); fx_copy_str(&slit_1, &obj_ext_0); } _fx_LR17K_form__kmodule_t lstend_0 = 0; _fx_LR17K_form__kmodule_t lst_0 = kmods_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_R14Ast__pragmas_t km_pragmas_0 = {0}; _fx_Li km_deps_0 = 0; _fx_LN14K_form__kexp_t km_top_0 = 0; fx_str_t km_cname_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t mname_0 = {0}; fx_str_t cname_0 = {0}; fx_str_t k_filename_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t o_filename_0 = {0}; fx_str_t new_kform_0 = {0}; fx_str_t old_kform_0 = {0}; fx_exn_t exn_0 = {0}; _fx_T3BBS v_2 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_3 = {0}; fx_str_t status_j_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_4 = {0}; _fx_R17K_form__kmodule_t rec_0 = {0}; _fx_R17K_form__kmodule_t* km_0 = &lst_0->hd; _fx_copy_R14Ast__pragmas_t(&km_0->km_pragmas, &km_pragmas_0); FX_COPY_PTR(km_0->km_deps, &km_deps_0); FX_COPY_PTR(km_0->km_top, &km_top_0); fx_copy_str(&km_0->km_cname, &km_cname_0); int_ km_idx_0 = km_0->km_idx; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = km_pragmas_0.pragma_cpp; } if (is_cpp_0) { fx_str_t slit_2 = FX_MAKE_STR(".cpp"); fx_copy_str(&slit_2, &ext_0); } else { fx_str_t slit_3 = FX_MAKE_STR(".c"); fx_copy_str(&slit_3, &ext_0); } FX_CALL(_fx_M8K_mangleFM12mangle_mnameS1S(&km_cname_0, &mname_0, 0), _fx_catch_5); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &mname_0, &cname_0, 0), _fx_catch_5); fx_str_t slit_4 = FX_MAKE_STR(".k"); { const fx_str_t strs_0[] = { cname_0, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &k_filename_0), _fx_catch_5); } { const fx_str_t strs_1[] = { cname_0, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &c_filename_0), _fx_catch_5); } { const fx_str_t strs_2[] = { cname_0, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &o_filename_0), _fx_catch_5); } FX_CALL(_fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t(km_top_0, &new_kform_0, 0), _fx_catch_5); bool have_k_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&k_filename_0, &have_k_0, 0), _fx_catch_5); bool have_c_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&c_filename_0, &have_c_0, 0), _fx_catch_5); bool have_o_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&o_filename_0, &have_o_0, 0), _fx_catch_5); bool have_all_0 = have_k_0 && have_c_0 && have_o_0; bool t_0; if (_fx_g12Options__opt.force_rebuild) { t_0 = true; } else { t_0 = !have_all_0; } if (t_0) { fx_str_t slit_5 = FX_MAKE_STR(""); fx_copy_str(&slit_5, &old_kform_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&k_filename_0, &old_kform_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&old_kform_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_0) { fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &old_kform_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_5); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_5); } } bool v_5 = _fx_F6__eq__B2SS(&new_kform_0, &old_kform_0, 0); if (v_5) { fx_str_t slit_7 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_7, &v_2); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&k_filename_0, &new_kform_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_5); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_5); } if (well_written_0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_3); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_9, &v_3); } else { fx_str_t slit_10 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_10, &v_3); } _fx_make_T3BBS(well_written_0, false, &v_3, &v_2); } bool ok_j_0 = v_2.t0; bool same_kform_0 = v_2.t1; fx_copy_str(&v_2.t2, &status_j_0); ok_1 = ok_1 && ok_j_0; if (!same_kform_0) { if (have_c_0) { FX_CALL(_fx_M3SysFM6removev1S(&c_filename_0, 0), _fx_catch_5); } if (have_o_0) { FX_CALL(_fx_M3SysFM6removev1S(&o_filename_0, 0), _fx_catch_5); } } bool skip_module_0; if (same_kform_0) { bool __fold_result___0 = true; _fx_Li lst_1 = km_deps_0; for (; lst_1; lst_1 = lst_1->tl) { int_ d_0 = lst_1->hd; FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, d_0), _fx_catch_2); if (!*FX_PTR_1D(bool, skip_flags_0, d_0)) { __fold_result___0 = false; FX_BREAK(_fx_catch_2); } _fx_catch_2: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_catch_5); } skip_module_0 = __fold_result___0; } else { skip_module_0 = false; } if (FX_STR_LENGTH(status_j_0) != 0) { fx_copy_str(&status_j_0, &status_j_1); } else if (skip_module_0) { fx_str_t slit_11 = FX_MAKE_STR("skip"); fx_copy_str(&slit_11, &status_j_1); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_12 = FX_MAKE_STR("process"); fx_copy_str(&slit_12, &status_j_1); } else { fx_str_t slit_13 = FX_MAKE_STR("process"); fx_copy_str(&slit_13, &status_j_1); } fx_str_t slit_14 = FX_MAKE_STR("K "); fx_str_t slit_15 = FX_MAKE_STR(": "); { const fx_str_t strs_3[] = { slit_14, km_cname_0, slit_15, status_j_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 4, &v_4), _fx_catch_5); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_4, 0), _fx_catch_5); if (skip_module_0) { _fx_LN14K_form__kexp_t lst_2 = km_top_0; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14K_form__kexp_t e_0 = lst_2->hd; if (FX_REC_VARIANT_TAG(e_0) == 32) { _fx_rR17K_form__kdeffun_t kf_0 = e_0->u.KDefFun; _fx_N17Ast__fun_constr_t v_6 = kf_0->data.kf_flags.fun_flag_ctor; if (v_6.tag == 1) { _fx_N14K_form__ktyp_t kf_rt_0 = 0; _fx_T2N14K_form__ktyp_tR10Ast__loc_t v_7 = {0}; _fx_N14K_form__kexp_t v_8 = 0; _fx_R17K_form__kdeffun_t v_9 = {0}; _fx_R17K_form__kdeffun_t* v_10 = &kf_0->data; _fx_R10Ast__loc_t kf_loc_0 = v_10->kf_loc; FX_COPY_PTR(v_10->kf_rt, &kf_rt_0); _fx_R16Ast__fun_flags_t kf_flags_0 = v_10->kf_flags; _fx_R17K_form__kdeffun_t* v_11 = &kf_0->data; _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t(kf_rt_0, &kf_loc_0, &v_7); fx_str_t slit_16 = FX_MAKE_STR(""); FX_CALL(_fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t(&slit_16, &v_7, &v_8), _fx_catch_3); _fx_R16Ast__fun_flags_t v_12 = { kf_flags_0.fun_flag_pure, true, kf_flags_0.fun_flag_have_keywords, false, kf_flags_0.fun_flag_nothrow, kf_flags_0.fun_flag_really_nothrow, kf_flags_0.fun_flag_private, kf_flags_0.fun_flag_ctor, kf_flags_0.fun_flag_method_of, kf_flags_0.fun_flag_uses_fv, kf_flags_0.fun_flag_recursive, kf_flags_0.fun_flag_instance }; _fx_make_R17K_form__kdeffun_t(&v_11->kf_name, &v_11->kf_cname, v_11->kf_params, v_11->kf_rt, v_8, &v_12, &v_11->kf_closure, v_11->kf_scope, &v_11->kf_loc, &v_9); _fx_R17K_form__kdeffun_t* v_13 = &kf_0->data; _fx_free_R17K_form__kdeffun_t(v_13); _fx_copy_R17K_form__kdeffun_t(&v_9, v_13); _fx_catch_3: ; _fx_free_R17K_form__kdeffun_t(&v_9); if (v_8) { _fx_free_N14K_form__kexp_t(&v_8); } _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&v_7); if (kf_rt_0) { _fx_free_N14K_form__ktyp_t(&kf_rt_0); } goto _fx_endmatch_2; } } _fx_endmatch_2: ; FX_CHECK_EXN(_fx_catch_4); _fx_catch_4: ; FX_CHECK_EXN(_fx_catch_5); } } FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, km_idx_0), _fx_catch_5); *FX_PTR_1D(bool, skip_flags_0, km_idx_0) = skip_module_0; _fx_make_R17K_form__kmodule_t(&km_0->km_name, km_0->km_idx, km_0->km_toposort_idx, &km_0->km_cname, km_0->km_top, km_0->km_deps, skip_module_0, km_0->km_main, &km_0->km_pragmas, &rec_0); _fx_LR17K_form__kmodule_t node_0 = 0; FX_CALL(_fx_cons_LR17K_form__kmodule_t(&rec_0, 0, false, &node_0), _fx_catch_5); FX_LIST_APPEND(kmods_1, lstend_0, node_0); _fx_catch_5: ; _fx_free_R17K_form__kmodule_t(&rec_0); FX_FREE_STR(&v_4); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_3); fx_free_exn(&exn_1); _fx_free_T3BBS(&v_2); fx_free_exn(&exn_0); FX_FREE_STR(&old_kform_0); FX_FREE_STR(&new_kform_0); FX_FREE_STR(&o_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&k_filename_0); FX_FREE_STR(&cname_0); FX_FREE_STR(&mname_0); FX_FREE_STR(&ext_0); FX_FREE_STR(&km_cname_0); if (km_top_0) { _fx_free_LN14K_form__kexp_t(&km_top_0); } FX_FREE_LIST_SIMPLE(&km_deps_0); _fx_free_R14Ast__pragmas_t(&km_pragmas_0); FX_CHECK_EXN(_fx_cleanup); } if (!ok_1) { fx_str_t slit_17 = FX_MAKE_STR("failed to write some k-forms"); FX_CALL(_fx_F9make_FailE1S(&slit_17, &v_0), _fx_cleanup); FX_THROW(&v_0, true, _fx_cleanup); } FX_COPY_PTR(kmods_1, fx_result); _fx_cleanup: ; FX_FREE_ARR(&skip_flags_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); FX_FREE_STR(&obj_ext_0); if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } fx_free_exn(&v_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_T2LR17K_form__kmodule_tB* fx_result, void* fx_fv) { _fx_LR17K_form__kmodule_t temp_kmods_0 = 0; _fx_LR17K_form__kmodule_t v_0 = 0; _fx_LR17K_form__kmodule_t v_1 = 0; _fx_LR17K_form__kmodule_t v_2 = 0; _fx_LR17K_form__kmodule_t v_3 = 0; _fx_LR17K_form__kmodule_t v_4 = 0; _fx_LR17K_form__kmodule_t v_5 = 0; _fx_LR17K_form__kmodule_t v_6 = 0; _fx_LR17K_form__kmodule_t v_7 = 0; _fx_LR17K_form__kmodule_t v_8 = 0; _fx_LR17K_form__kmodule_t v_9 = 0; _fx_LR17K_form__kmodule_t v_10 = 0; _fx_LR17K_form__kmodule_t v_11 = 0; _fx_LR17K_form__kmodule_t v_12 = 0; _fx_LR17K_form__kmodule_t v_13 = 0; _fx_LR17K_form__kmodule_t v_14 = 0; _fx_LR17K_form__kmodule_t v_15 = 0; _fx_LR17K_form__kmodule_t v_16 = 0; _fx_LR17K_form__kmodule_t v_17 = 0; int fx_status = 0; _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; int_ niters_0 = _fx_g12Options__opt.optim_iters; FX_COPY_PTR(kmods_0, &temp_kmods_0); fx_str_t slit_0 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_0, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_0, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_0, &temp_kmods_0); fx_str_t slit_1 = FX_MAKE_STR("\tannotate types"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_1, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_1, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_1, &temp_kmods_0); fx_str_t slit_2 = FX_MAKE_STR("\tcopy generic/inline functions"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_2, 0), _fx_cleanup); FX_CALL(_fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_2, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_2, &temp_kmods_0); fx_str_t slit_3 = FX_MAKE_STR("\tremove unused by main"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_3, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_3, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_3, &temp_kmods_0); fx_str_t slit_4 = FX_MAKE_STR("\tmangle & dump intermediate K-forms"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_4, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_4, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_4, &temp_kmods_0); FX_CALL(_fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_5, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_5, &temp_kmods_0); FX_CALL(_fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_6, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_6, &temp_kmods_0); fx_str_t slit_5 = FX_MAKE_STR("\tdemangle"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_5, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_7, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_7, &temp_kmods_0); int_ v_18 = niters_0 + 1; int_ n_0 = FX_LOOP_COUNT(1, v_18, 1); for (int_ i_0 = 0; i_0 < n_0; i_0++) { fx_str_t v_19 = {0}; fx_str_t v_20 = {0}; _fx_LR17K_form__kmodule_t v_21 = 0; _fx_LR17K_form__kmodule_t v_22 = 0; _fx_LR17K_form__kmodule_t v_23 = 0; _fx_LR17K_form__kmodule_t v_24 = 0; _fx_LR17K_form__kmodule_t v_25 = 0; _fx_LR17K_form__kmodule_t v_26 = 0; _fx_LR17K_form__kmodule_t v_27 = 0; _fx_LR17K_form__kmodule_t v_28 = 0; _fx_LR17K_form__kmodule_t v_29 = 0; _fx_LR17K_form__kmodule_t v_30 = 0; int_ i_1 = 1 + i_0 * 1; FX_CALL(_fx_F6stringS1i(i_1, &v_19, 0), _fx_catch_0); fx_str_t slit_6 = FX_MAKE_STR("Optimization pass #"); fx_str_t slit_7 = FX_MAKE_STR(":"); { const fx_str_t strs_0[] = { slit_6, v_19, slit_7 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_20), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_20, 0), _fx_catch_0); if (i_1 <= 2) { fx_str_t slit_8 = FX_MAKE_STR("\tsimple lambda lifting"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_8, 0), _fx_catch_0); FX_CALL(_fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_21, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_21, &temp_kmods_0); } fx_str_t slit_9 = FX_MAKE_STR("\ttailrec"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_9, 0), _fx_catch_0); FX_CALL(_fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_22, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_22, &temp_kmods_0); fx_str_t slit_10 = FX_MAKE_STR("\tloop inv"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_10, 0), _fx_catch_0); FX_CALL(_fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_23, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_23, &temp_kmods_0); fx_str_t slit_11 = FX_MAKE_STR("\tgemm implantation"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_11, 0), _fx_catch_0); FX_CALL(_fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_24, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_24, &temp_kmods_0); fx_str_t slit_12 = FX_MAKE_STR("\tinline"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_12, 0), _fx_catch_0); if (_fx_g12Options__opt.inline_thresh > 0) { FX_CALL(_fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_25, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_25, &temp_kmods_0); } fx_str_t slit_13 = FX_MAKE_STR("\tflatten"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_13, 0), _fx_catch_0); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_26, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_26, &temp_kmods_0); fx_str_t slit_14 = FX_MAKE_STR("\tfuse loops"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_14, 0), _fx_catch_0); FX_CALL(_fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_27, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_27, &temp_kmods_0); fx_str_t slit_15 = FX_MAKE_STR("\tfast idx"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_15, 0), _fx_catch_0); FX_CALL(_fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_28, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_28, &temp_kmods_0); fx_str_t slit_16 = FX_MAKE_STR("\tconst folding"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_16, 0), _fx_catch_0); FX_CALL(_fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_29, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_29, &temp_kmods_0); fx_str_t slit_17 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_17, 0), _fx_catch_0); FX_CALL( _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_30, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_30, &temp_kmods_0); _fx_catch_0: ; if (v_30) { _fx_free_LR17K_form__kmodule_t(&v_30); } if (v_29) { _fx_free_LR17K_form__kmodule_t(&v_29); } if (v_28) { _fx_free_LR17K_form__kmodule_t(&v_28); } if (v_27) { _fx_free_LR17K_form__kmodule_t(&v_27); } if (v_26) { _fx_free_LR17K_form__kmodule_t(&v_26); } if (v_25) { _fx_free_LR17K_form__kmodule_t(&v_25); } if (v_24) { _fx_free_LR17K_form__kmodule_t(&v_24); } if (v_23) { _fx_free_LR17K_form__kmodule_t(&v_23); } if (v_22) { _fx_free_LR17K_form__kmodule_t(&v_22); } if (v_21) { _fx_free_LR17K_form__kmodule_t(&v_21); } FX_FREE_STR(&v_20); FX_FREE_STR(&v_19); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_18 = FX_MAKE_STR("Finalizing K-form:"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_18, 0), _fx_cleanup); fx_str_t slit_19 = FX_MAKE_STR("\tmaking wrappers for nothrow functions"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_cleanup); FX_CALL( _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_8, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_8, &temp_kmods_0); fx_str_t slit_20 = FX_MAKE_STR("\tmutable freevars referencing"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_20, 0), _fx_cleanup); FX_CALL(_fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_9, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_9, &temp_kmods_0); fx_str_t slit_21 = FX_MAKE_STR("\tdeclosuring"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_21, 0), _fx_cleanup); FX_CALL(_fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_10, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_10, &temp_kmods_0); fx_str_t slit_22 = FX_MAKE_STR("\tlambda lifting"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_22, 0), _fx_cleanup); FX_CALL(_fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_11, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_11, &temp_kmods_0); fx_str_t slit_23 = FX_MAKE_STR("\tflatten"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_23, 0), _fx_cleanup); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_12, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_12, &temp_kmods_0); fx_str_t slit_24 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_24, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_13, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_13, &temp_kmods_0); fx_str_t slit_25 = FX_MAKE_STR("\tmangle"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_25, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_14, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_14, &temp_kmods_0); fx_str_t slit_26 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_26, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_15, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_15, &temp_kmods_0); fx_str_t slit_27 = FX_MAKE_STR("\tmark recursive"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_27, 0), _fx_cleanup); FX_CALL(_fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_16, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_16, &temp_kmods_0); fx_str_t slit_28 = FX_MAKE_STR("\tannotate types"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_28, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_17, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_17, &temp_kmods_0); _fx_make_T2LR17K_form__kmodule_tB(temp_kmods_0, _fx_g21Ast__all_compile_errs == 0, fx_result); _fx_cleanup: ; if (temp_kmods_0) { _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); } if (v_0) { _fx_free_LR17K_form__kmodule_t(&v_0); } if (v_1) { _fx_free_LR17K_form__kmodule_t(&v_1); } if (v_2) { _fx_free_LR17K_form__kmodule_t(&v_2); } if (v_3) { _fx_free_LR17K_form__kmodule_t(&v_3); } if (v_4) { _fx_free_LR17K_form__kmodule_t(&v_4); } if (v_5) { _fx_free_LR17K_form__kmodule_t(&v_5); } if (v_6) { _fx_free_LR17K_form__kmodule_t(&v_6); } if (v_7) { _fx_free_LR17K_form__kmodule_t(&v_7); } if (v_8) { _fx_free_LR17K_form__kmodule_t(&v_8); } if (v_9) { _fx_free_LR17K_form__kmodule_t(&v_9); } if (v_10) { _fx_free_LR17K_form__kmodule_t(&v_10); } if (v_11) { _fx_free_LR17K_form__kmodule_t(&v_11); } if (v_12) { _fx_free_LR17K_form__kmodule_t(&v_12); } if (v_13) { _fx_free_LR17K_form__kmodule_t(&v_13); } if (v_14) { _fx_free_LR17K_form__kmodule_t(&v_14); } if (v_15) { _fx_free_LR17K_form__kmodule_t(&v_15); } if (v_16) { _fx_free_LR17K_form__kmodule_t(&v_16); } if (v_17) { _fx_free_LR17K_form__kmodule_t(&v_17); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, fx_str_t* ficus_root_0, bool* fx_result, void* fx_fv) { fx_str_t osinfo_0 = {0}; fx_str_t runtime_include_path_0 = {0}; fx_str_t runtime_lib_path_0 = {0}; fx_str_t runtime_impl_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; _fx_Ta9S v_0 = {0}; fx_str_t opt_flags_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; fx_str_t cflags_0 = {0}; _fx_Ta4S v_5 = {0}; _fx_Ta2S v_6 = {0}; fx_str_t omp_cflags_0 = {0}; fx_str_t omp_lib_0 = {0}; _fx_Ta3S v_7 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t libpath_0 = {0}; fx_str_t cflags_1 = {0}; fx_str_t clibs_0 = {0}; fx_str_t omp_flags_0 = {0}; fx_str_t os_0 = {0}; fx_str_t libpath_1 = {0}; fx_str_t cflags_2 = {0}; fx_str_t clibs_1 = {0}; fx_str_t ggdb_opt_0 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_str_t v_14 = {0}; fx_str_t cflags_3 = {0}; fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; fx_str_t clibs_2 = {0}; fx_str_t c_comp_0 = {0}; fx_str_t cpp_comp_0 = {0}; fx_str_t obj_ext_0 = {0}; fx_str_t obj_opt_0 = {0}; fx_str_t appname_opt_0 = {0}; fx_str_t link_lib_opt_0 = {0}; fx_str_t cflags_4 = {0}; fx_str_t clibs_3 = {0}; fx_str_t custom_cflags_0 = {0}; fx_str_t v_19 = {0}; fx_str_t custom_cflags_1 = {0}; fx_str_t v_20 = {0}; fx_str_t cflags_5 = {0}; fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; fx_str_t v_23 = {0}; _fx_R14Ast__pragmas_t v_24 = {0}; _fx_R17C_form__cmodule_t runtime_pseudo_cmod_0 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; fx_arr_t v_25 = {0}; fx_arr_t results_0 = {0}; _fx_T5BBLSBLS __fold_result___0 = {0}; _fx_T5BBLSBLS v_26 = {0}; _fx_LS all_clibs_0 = 0; _fx_LS objs_0 = 0; fx_str_t v_27 = {0}; fx_str_t v_28 = {0}; fx_str_t v_29 = {0}; fx_str_t v_30 = {0}; fx_str_t custom_clibs_0 = {0}; fx_str_t v_31 = {0}; fx_str_t custom_clibs_1 = {0}; fx_str_t v_32 = {0}; fx_str_t custom_clibs_2 = {0}; _fx_LS v_33 = 0; _fx_LS v_34 = 0; fx_str_t v_35 = {0}; fx_str_t clibs_4 = {0}; fx_str_t v_36 = {0}; fx_str_t v_37 = {0}; fx_str_t v_38 = {0}; fx_str_t v_39 = {0}; fx_str_t cmd_0 = {0}; fx_str_t v_40 = {0}; fx_str_t cmd_1 = {0}; int fx_status = 0; FX_CALL(_fx_g11Sys__osname.fp(true, &osinfo_0, _fx_g11Sys__osname.fcv), _fx_cleanup); int_ opt_level_0 = _fx_g12Options__opt.optimize_level; bool enable_openmp_0 = _fx_g12Options__opt.enable_openmp; fx_str_t slit_0 = FX_MAKE_STR("runtime"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_0, &runtime_include_path_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("runtime/lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_1, &runtime_lib_path_0, 0), _fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR("runtime/ficus/impl/libficus"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_2, &runtime_impl_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { if (opt_level_0 == 0) { fx_str_t slit_3 = FX_MAKE_STR(" /MTd /Od /GF"); fx_copy_str(&slit_3, &opt_flags_0); } else { if (opt_level_0 == 1) { fx_str_t slit_4 = FX_MAKE_STR("/O1"); fx_copy_str(&slit_4, &v_1); } else { fx_str_t slit_5 = FX_MAKE_STR("/O2"); fx_copy_str(&slit_5, &v_1); } fx_str_t slit_6 = FX_MAKE_STR(" /MT "); { const fx_str_t strs_0[] = { slit_6, v_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &opt_flags_0), _fx_cleanup); } } FX_CALL(_fx_M8CompilerFM6stringS1S(&opt_flags_0, &v_2, 0), _fx_cleanup); fx_str_t slit_7 = FX_MAKE_STR(""); FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_7, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_4, 0), _fx_cleanup); fx_str_t slit_8 = FX_MAKE_STR("/nologo"); fx_str_t slit_9 = FX_MAKE_STR(" /I"); { const fx_str_t strs_1[] = { slit_8, v_2, v_3, slit_9, v_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 5, &cflags_0), _fx_cleanup); } fx_str_t slit_10 = FX_MAKE_STR("win"); fx_str_t slit_11 = FX_MAKE_STR("cl"); fx_str_t slit_12 = FX_MAKE_STR("cl"); fx_str_t slit_13 = FX_MAKE_STR(".obj"); fx_str_t slit_14 = FX_MAKE_STR("/c /Fo"); fx_str_t slit_15 = FX_MAKE_STR("/Fe"); fx_str_t slit_16 = FX_MAKE_STR(""); fx_str_t slit_17 = FX_MAKE_STR("/F10485760 kernel32.lib advapi32.lib"); _fx_make_Ta9S(&slit_10, &slit_11, &slit_12, &slit_13, &slit_14, &slit_15, &slit_16, &cflags_0, &slit_17, &v_0); } else { bool v_41; fx_str_t slit_18 = FX_MAKE_STR("Darwin"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_18, &v_41, 0), _fx_cleanup); if (v_41) { if (enable_openmp_0) { fx_str_t slit_19 = FX_MAKE_STR("-Xclang -fopenmp"); fx_str_t slit_20 = FX_MAKE_STR(" -lomp"); _fx_make_Ta2S(&slit_19, &slit_20, &v_6); } else { fx_str_t slit_21 = FX_MAKE_STR(""); fx_str_t slit_22 = FX_MAKE_STR(""); _fx_make_Ta2S(&slit_21, &slit_22, &v_6); } fx_copy_str(&v_6.t0, &omp_cflags_0); fx_copy_str(&v_6.t1, &omp_lib_0); bool v_42; fx_str_t slit_23 = FX_MAKE_STR("x86_64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_23, &v_42, 0), _fx_cleanup); if (v_42) { fx_str_t slit_24 = FX_MAKE_STR(" "); { const fx_str_t strs_2[] = { slit_24, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_8), _fx_cleanup); } fx_str_t slit_25 = FX_MAKE_STR("macos_x64"); _fx_make_Ta3S(&slit_25, &omp_cflags_0, &v_8, &v_7); } else { bool v_43; fx_str_t slit_26 = FX_MAKE_STR("arm64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_26, &v_43, 0), _fx_cleanup); if (v_43) { fx_str_t slit_27 = FX_MAKE_STR(" "); { const fx_str_t strs_3[] = { slit_27, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_9), _fx_cleanup); } fx_str_t slit_28 = FX_MAKE_STR("macos_arm64"); _fx_make_Ta3S(&slit_28, &omp_cflags_0, &v_9, &v_7); } else { fx_str_t slit_29 = FX_MAKE_STR(""); fx_str_t slit_30 = FX_MAKE_STR(""); fx_str_t slit_31 = FX_MAKE_STR(""); _fx_make_Ta3S(&slit_29, &slit_30, &slit_31, &v_7); } } fx_copy_str(&v_7.t0, &libpath_0); fx_copy_str(&v_7.t1, &cflags_1); fx_copy_str(&v_7.t2, &clibs_0); fx_str_t slit_32 = FX_MAKE_STR("macos"); _fx_make_Ta4S(&slit_32, &libpath_0, &cflags_1, &clibs_0, &v_5); } else { bool v_44; fx_str_t slit_33 = FX_MAKE_STR("Linux"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_33, &v_44, 0), _fx_cleanup); if (v_44) { if (enable_openmp_0) { fx_str_t slit_34 = FX_MAKE_STR(" -fopenmp"); fx_copy_str(&slit_34, &omp_flags_0); } else { fx_str_t slit_35 = FX_MAKE_STR(""); fx_copy_str(&slit_35, &omp_flags_0); } fx_str_t slit_36 = FX_MAKE_STR("linux"); fx_str_t slit_37 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_36, &slit_37, &omp_flags_0, &omp_flags_0, &v_5); } else if (_fx_g9Sys__unix) { fx_str_t slit_38 = FX_MAKE_STR("unix"); fx_str_t slit_39 = FX_MAKE_STR(""); fx_str_t slit_40 = FX_MAKE_STR(""); fx_str_t slit_41 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_38, &slit_39, &slit_40, &slit_41, &v_5); } else { fx_str_t slit_42 = FX_MAKE_STR(""); fx_str_t slit_43 = FX_MAKE_STR(""); fx_str_t slit_44 = FX_MAKE_STR(""); fx_str_t slit_45 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_42, &slit_43, &slit_44, &slit_45, &v_5); } } fx_copy_str(&v_5.t0, &os_0); fx_copy_str(&v_5.t1, &libpath_1); fx_copy_str(&v_5.t2, &cflags_2); fx_copy_str(&v_5.t3, &clibs_1); if (opt_level_0 == 0) { fx_str_t slit_46 = FX_MAKE_STR(" -ggdb"); fx_copy_str(&slit_46, &ggdb_opt_0); } else { fx_str_t slit_47 = FX_MAKE_STR(""); fx_copy_str(&slit_47, &ggdb_opt_0); } FX_CALL(_fx_F6stringS1i(opt_level_0, &v_10, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&ggdb_opt_0, &v_11, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_2, &v_12, 0), _fx_cleanup); fx_str_t slit_48 = FX_MAKE_STR("-Wno-unknown-warning-option -Wno-dangling-else -Wno-static-in-inline"); FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_48, &v_13, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_14, 0), _fx_cleanup); fx_str_t slit_49 = FX_MAKE_STR("-O"); fx_str_t slit_50 = FX_MAKE_STR(" "); fx_str_t slit_51 = FX_MAKE_STR(" "); fx_str_t slit_52 = FX_MAKE_STR(" -I"); { const fx_str_t strs_4[] = { slit_49, v_10, v_11, slit_50, v_12, slit_51, v_13, slit_52, v_14 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 9, &cflags_3), _fx_cleanup); } if (FX_STR_LENGTH(libpath_1) != 0) { FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_lib_path_0, &v_16, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&libpath_1, &v_17, 0), _fx_cleanup); fx_str_t slit_53 = FX_MAKE_STR("-L"); fx_str_t slit_54 = FX_MAKE_STR("/"); fx_str_t slit_55 = FX_MAKE_STR(" "); { const fx_str_t strs_5[] = { slit_53, v_16, slit_54, v_17, slit_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_15), _fx_cleanup); } } else { fx_str_t slit_56 = FX_MAKE_STR(""); fx_copy_str(&slit_56, &v_15); } FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_1, &v_18, 0), _fx_cleanup); fx_str_t slit_57 = FX_MAKE_STR("-lm "); { const fx_str_t strs_6[] = { v_15, slit_57, v_18 }; FX_CALL(fx_strjoin(0, 0, 0, strs_6, 3, &clibs_2), _fx_cleanup); } fx_str_t slit_58 = FX_MAKE_STR("cc"); fx_str_t slit_59 = FX_MAKE_STR("c++ -std=c++11"); fx_str_t slit_60 = FX_MAKE_STR(".o"); fx_str_t slit_61 = FX_MAKE_STR("-c -o "); fx_str_t slit_62 = FX_MAKE_STR("-o "); fx_str_t slit_63 = FX_MAKE_STR("-l"); _fx_make_Ta9S(&os_0, &slit_58, &slit_59, &slit_60, &slit_61, &slit_62, &slit_63, &cflags_3, &clibs_2, &v_0); } fx_copy_str(&v_0.t1, &c_comp_0); fx_copy_str(&v_0.t2, &cpp_comp_0); fx_copy_str(&v_0.t3, &obj_ext_0); fx_copy_str(&v_0.t4, &obj_opt_0); fx_copy_str(&v_0.t5, &appname_opt_0); fx_copy_str(&v_0.t6, &link_lib_opt_0); fx_copy_str(&v_0.t7, &cflags_4); fx_copy_str(&v_0.t8, &clibs_3); fx_str_t slit_64 = FX_MAKE_STR("FICUS_CFLAGS"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_64, &custom_cflags_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.cflags, &v_19); if (FX_STR_LENGTH(v_19) == 0) { fx_copy_str(&custom_cflags_0, &custom_cflags_1); } else { fx_copy_str(&_fx_g12Options__opt.cflags, &v_20); fx_str_t slit_65 = FX_MAKE_STR(" "); { const fx_str_t strs_7[] = { v_20, slit_65, custom_cflags_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_7, 3, &custom_cflags_1), _fx_cleanup); } } fx_str_t slit_66 = FX_MAKE_STR(" "); { const fx_str_t strs_8[] = { cflags_4, slit_66, custom_cflags_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_8, 3, &cflags_5), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_21, 0), _fx_cleanup); fx_str_t slit_67 = FX_MAKE_STR("Compiling .c/.cpp files with cflags="); { const fx_str_t strs_9[] = { slit_67, v_21 }; FX_CALL(fx_strjoin(0, 0, 0, strs_9, 2, &v_22), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g17Compiler__MsgBlue, &v_22, &v_23, 0), _fx_cleanup); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_23, 0), _fx_cleanup); _fx_make_R14Ast__pragmas_t(false, 0, &v_24); _fx_make_R17C_form__cmodule_t(&_fx_g9Ast__noid, &runtime_impl_0, 0, false, true, false, &v_24, &runtime_pseudo_cmod_0); FX_CALL(_fx_cons_LR17C_form__cmodule_t(&runtime_pseudo_cmod_0, cmods_0, true, &cmods_1), _fx_cleanup); FX_CALL(_fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_1, &v_25, 0), _fx_cleanup); int par_status_0 = 0; int_ ni_0 = FX_ARR_SIZE(v_25, 0); _fx_R17C_form__cmodule_t* ptr_v_0 = FX_PTR_1D(_fx_R17C_form__cmodule_t, v_25, 0); { const int_ shape_0[] = { ni_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_T5BBLSBS), (fx_free_t)_fx_free_T5BBLSBS, (fx_copy_t)_fx_copy_T5BBLSBS, 0, &results_0), _fx_cleanup); } #pragma omp parallel for for (int_ i_0 = 0; i_0 < ni_0; i_0++) { int fx_status = 0; _fx_R17C_form__cmodule_t __pat___0 = {0}; _fx_LT2SR10Ast__loc_t pragma_clibs_0 = 0; _fx_LN15C_form__cstmt_t cmod_ccode_0 = 0; fx_str_t cmod_cname_0 = {0}; fx_str_t output_fname_0 = {0}; _fx_Ta2S v_45 = {0}; fx_str_t comp_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t output_fname_1 = {0}; fx_str_t output_fname_c_0 = {0}; _fx_T3BBS v_46 = {0}; fx_str_t str_new_0 = {0}; fx_str_t str_old_0 = {0}; fx_exn_t exn_0 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_47 = {0}; fx_str_t v_48 = {0}; fx_str_t v_49 = {0}; fx_str_t status_j_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t obj_filename_0 = {0}; _fx_T3BBS v_50 = {0}; fx_str_t v_51 = {0}; fx_str_t v_52 = {0}; fx_str_t v_53 = {0}; fx_str_t v_54 = {0}; fx_str_t v_55 = {0}; fx_str_t cmd_2 = {0}; fx_str_t status_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_56 = {0}; fx_str_t v_57 = {0}; fx_str_t v_58 = {0}; _fx_LS v_59 = 0; _fx_LS clibs_5 = 0; _fx_T5BBLSBS tup_0 = {0}; _fx_copy_R17C_form__cmodule_t(ptr_v_0 + i_0, &__pat___0); _fx_T5BBLSBS* dstptr_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, i_0); _fx_R14Ast__pragmas_t* i_1 = &__pat___0.cmod_pragmas; FX_COPY_PTR(i_1->pragma_clibs, &pragma_clibs_0); FX_COPY_PTR(__pat___0.cmod_ccode, &cmod_ccode_0); fx_copy_str(&__pat___0.cmod_cname, &cmod_cname_0); FX_CALL(_fx_M8FilenameFM8basenameS1S(&cmod_cname_0, &output_fname_0, 0), _fx_catch_3); bool is_runtime_0 = _fx_F6__eq__B2SS(&cmod_cname_0, &runtime_impl_0, 0); bool is_cpp_0; if (!is_runtime_0) { if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = i_1->pragma_cpp; } } else { is_cpp_0 = false; } if (is_cpp_0) { fx_str_t slit_68 = FX_MAKE_STR(".cpp"); _fx_make_Ta2S(&cpp_comp_0, &slit_68, &v_45); } else { fx_str_t slit_69 = FX_MAKE_STR(".c"); _fx_make_Ta2S(&c_comp_0, &slit_69, &v_45); } fx_copy_str(&v_45.t0, &comp_0); fx_copy_str(&v_45.t1, &ext_0); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &output_fname_0, &output_fname_1, 0), _fx_catch_3); { const fx_str_t strs_10[] = { output_fname_1, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_10, 2, &output_fname_c_0), _fx_catch_3); } if (__pat___0.cmod_skip) { fx_str_t slit_70 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(true, false, &slit_70, &v_46); } else if (is_runtime_0) { fx_str_t slit_71 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_71, &v_46); } else { FX_CALL(_fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t(cmod_ccode_0, &str_new_0, 0), _fx_catch_3); if (_fx_g12Options__opt.force_rebuild) { fx_str_t slit_72 = FX_MAKE_STR(""); fx_copy_str(&slit_72, &str_old_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&output_fname_c_0, &str_old_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&str_old_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_0) { fx_str_t slit_73 = FX_MAKE_STR(""); fx_copy_str(&slit_73, &str_old_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_3); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_3); } } bool v_60 = _fx_F6__eq__B2SS(&str_new_0, &str_old_0, 0); if (v_60) { fx_str_t slit_74 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(ok_1, false, &slit_74, &v_46); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&output_fname_c_0, &str_new_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_3); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_3); } if (well_written_0) { fx_str_t slit_75 = FX_MAKE_STR(""); fx_copy_str(&slit_75, &v_47); } else { FX_CALL(_fx_M8CompilerFM6stringS1S(&output_fname_c_0, &v_48, 0), _fx_catch_3); fx_str_t slit_76 = FX_MAKE_STR("failed to write "); { const fx_str_t strs_11[] = { slit_76, v_48 }; FX_CALL(fx_strjoin(0, 0, 0, strs_11, 2, &v_49), _fx_catch_3); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &v_49, &v_47, 0), _fx_catch_3); } _fx_make_T3BBS(well_written_0, well_written_0, &v_47, &v_46); } } bool ok_j_0 = v_46.t0; bool reprocess_0 = v_46.t1; fx_copy_str(&v_46.t2, &status_j_0); if (is_runtime_0) { fx_str_t slit_77 = FX_MAKE_STR(".c"); { const fx_str_t strs_12[] = { runtime_impl_0, slit_77 }; FX_CALL(fx_strjoin(0, 0, 0, strs_12, 2, &c_filename_0), _fx_catch_3); } } else { fx_copy_str(&output_fname_c_0, &c_filename_0); } { const fx_str_t strs_13[] = { output_fname_1, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_13, 2, &obj_filename_0), _fx_catch_3); } bool v_61; if (ok_j_0) { if (reprocess_0) { v_61 = true; } else { bool v_62; FX_CALL(_fx_M8FilenameFM6existsB1S(&obj_filename_0, &v_62, 0), _fx_catch_3); v_61 = !v_62; } } else { v_61 = false; } if (v_61) { FX_CALL(_fx_M8CompilerFM6stringS1S(&comp_0, &v_51, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_52, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_opt_0, &v_53, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_filename_0, &v_54, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_55, 0), _fx_catch_3); fx_str_t slit_78 = FX_MAKE_STR(" "); fx_str_t slit_79 = FX_MAKE_STR(" "); fx_str_t slit_80 = FX_MAKE_STR(" "); { const fx_str_t strs_14[] = { v_51, slit_78, v_52, slit_79, v_53, v_54, slit_80, v_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_14, 8, &cmd_2), _fx_catch_3); } int_ v_63; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_2, &v_63, 0), _fx_catch_3); bool result_0 = v_63 == 0; if (result_0) { fx_str_t slit_81 = FX_MAKE_STR("ok"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g18Compiler__MsgGreen, &slit_81, &status_0, 0), _fx_catch_3); } else { fx_str_t slit_82 = FX_MAKE_STR("fail"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_82, &status_0, 0), _fx_catch_3); } _fx_make_T3BBS(result_0, true, &status_0, &v_50); } else { _fx_make_T3BBS(ok_j_0, false, &status_j_0, &v_50); } bool ok_j_1 = v_50.t0; bool recompiled_0 = v_50.t1; fx_copy_str(&v_50.t2, &status_j_1); FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_56, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&status_j_1, &v_57, 0), _fx_catch_3); fx_str_t slit_83 = FX_MAKE_STR("CC "); fx_str_t slit_84 = FX_MAKE_STR(": "); { const fx_str_t strs_15[] = { slit_83, v_56, slit_84, v_57 }; FX_CALL(fx_strjoin(0, 0, 0, strs_15, 4, &v_58), _fx_catch_3); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_58, 0), _fx_catch_3); _fx_LS lstend_0 = 0; _fx_LT2SR10Ast__loc_t lst_0 = pragma_clibs_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_T2SR10Ast__loc_t* __pat___1 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&__pat___1->t0, 0, false, &node_0), _fx_catch_2); FX_LIST_APPEND(v_59, lstend_0, node_0); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } FX_CALL(_fx_M8CompilerFM3revLS1LS(v_59, &clibs_5, 0), _fx_catch_3); _fx_make_T5BBLSBS(is_cpp_0, recompiled_0, clibs_5, ok_j_1, &obj_filename_0, &tup_0); _fx_copy_T5BBLSBS(&tup_0, dstptr_0); _fx_catch_3: ; _fx_free_T5BBLSBS(&tup_0); if (clibs_5) { _fx_free_LS(&clibs_5); } if (v_59) { _fx_free_LS(&v_59); } FX_FREE_STR(&v_58); FX_FREE_STR(&v_57); FX_FREE_STR(&v_56); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_0); FX_FREE_STR(&cmd_2); FX_FREE_STR(&v_55); FX_FREE_STR(&v_54); FX_FREE_STR(&v_53); FX_FREE_STR(&v_52); FX_FREE_STR(&v_51); _fx_free_T3BBS(&v_50); FX_FREE_STR(&obj_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_49); FX_FREE_STR(&v_48); FX_FREE_STR(&v_47); fx_free_exn(&exn_1); fx_free_exn(&exn_0); FX_FREE_STR(&str_old_0); FX_FREE_STR(&str_new_0); _fx_free_T3BBS(&v_46); FX_FREE_STR(&output_fname_c_0); FX_FREE_STR(&output_fname_1); FX_FREE_STR(&ext_0); FX_FREE_STR(&comp_0); _fx_free_Ta2S(&v_45); FX_FREE_STR(&output_fname_0); FX_FREE_STR(&cmod_cname_0); if (cmod_ccode_0) { _fx_free_LN15C_form__cstmt_t(&cmod_ccode_0); } if (pragma_clibs_0) { _fx_free_LT2SR10Ast__loc_t(&pragma_clibs_0); } _fx_free_R17C_form__cmodule_t(&__pat___0); FX_CHECK_EXN_PARALLEL(fx_status, par_status_0); } FX_UPDATE_EXN_PARALLEL(par_status_0, _fx_cleanup); _fx_make_T5BBLSBLS(false, false, 0, ok_1, 0, &__fold_result___0); int_ ni_1 = FX_ARR_SIZE(results_0, 0); _fx_T5BBLSBS* ptr_results_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, 0); for (int_ i_2 = 0; i_2 < ni_1; i_2++) { _fx_T5BBLSBS __pat___2 = {0}; _fx_LS clibs_j_0 = 0; fx_str_t obj_0 = {0}; _fx_T5BBLSBLS v_64 = {0}; _fx_LS all_clibs_1 = 0; _fx_LS objs_1 = 0; _fx_LS v_65 = 0; _fx_T5BBLSBLS v_66 = {0}; _fx_copy_T5BBLSBS(ptr_results_0 + i_2, &__pat___2); FX_COPY_PTR(__pat___2.t2, &clibs_j_0); fx_copy_str(&__pat___2.t4, &obj_0); _fx_copy_T5BBLSBLS(&__fold_result___0, &v_64); FX_COPY_PTR(v_64.t2, &all_clibs_1); FX_COPY_PTR(v_64.t4, &objs_1); FX_CALL(_fx_M8CompilerFM7__add__LS2LSLS(clibs_j_0, all_clibs_1, &v_65, 0), _fx_catch_4); FX_CALL(_fx_cons_LS(&obj_0, objs_1, false, &objs_1), _fx_catch_4); _fx_make_T5BBLSBLS(v_64.t0 || __pat___2.t0, v_64.t1 || __pat___2.t1, v_65, v_64.t3 && __pat___2.t3, objs_1, &v_66); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_copy_T5BBLSBLS(&v_66, &__fold_result___0); _fx_catch_4: ; _fx_free_T5BBLSBLS(&v_66); if (v_65) { _fx_free_LS(&v_65); } if (objs_1) { _fx_free_LS(&objs_1); } if (all_clibs_1) { _fx_free_LS(&all_clibs_1); } _fx_free_T5BBLSBLS(&v_64); FX_FREE_STR(&obj_0); if (clibs_j_0) { _fx_free_LS(&clibs_j_0); } _fx_free_T5BBLSBS(&__pat___2); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T5BBLSBLS(&__fold_result___0, &v_26); bool any_cpp_0 = v_26.t0; bool any_recompiled_0 = v_26.t1; FX_COPY_PTR(v_26.t2, &all_clibs_0); bool ok_2 = v_26.t3; FX_COPY_PTR(v_26.t4, &objs_0); bool v_67; bool t_0; if (ok_2) { t_0 = !any_recompiled_0; } else { t_0 = false; } if (t_0) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_27); FX_CALL(_fx_M8FilenameFM6existsB1S(&v_27, &v_67, 0), _fx_cleanup); } else { v_67 = false; } if (v_67) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_28); FX_CALL(_fx_M8CompilerFM6stringS1S(&v_28, &v_29, 0), _fx_cleanup); fx_str_t slit_85 = FX_MAKE_STR(" is up-to-date\n"); { const fx_str_t strs_16[] = { v_29, slit_85 }; FX_CALL(fx_strjoin(0, 0, 0, strs_16, 2, &v_30), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_30, 0), _fx_cleanup); *fx_result = ok_2; } else if (!ok_2) { *fx_result = ok_2; } else { fx_str_t slit_86 = FX_MAKE_STR("FICUS_LINK_LIBRARIES"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_86, &custom_clibs_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.clibs, &v_31); if (FX_STR_LENGTH(v_31) == 0) { fx_copy_str(&custom_clibs_0, &custom_clibs_1); } else { fx_copy_str(&_fx_g12Options__opt.clibs, &v_32); fx_str_t slit_87 = FX_MAKE_STR(" "); { const fx_str_t strs_17[] = { custom_clibs_0, slit_87, v_32 }; FX_CALL(fx_strjoin(0, 0, 0, strs_17, 3, &custom_clibs_1), _fx_cleanup); } } if (all_clibs_0 == 0) { fx_copy_str(&custom_clibs_1, &custom_clibs_2); } else { FX_CALL(_fx_M8CompilerFM3revLS1LS(all_clibs_0, &v_33, 0), _fx_cleanup); _fx_LS lstend_1 = 0; _fx_LS lst_1 = v_33; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t concat_str_0 = {0}; fx_str_t* l_0 = &lst_1->hd; { const fx_str_t strs_18[] = { link_lib_opt_0, *l_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_18, 2, &concat_str_0), _fx_catch_5); } _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&concat_str_0, 0, false, &node_1), _fx_catch_5); FX_LIST_APPEND(v_34, lstend_1, node_1); _fx_catch_5: ; FX_FREE_STR(&concat_str_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_88 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_88, v_34, &v_35, 0), _fx_cleanup); fx_str_t slit_89 = FX_MAKE_STR(" "); { const fx_str_t strs_19[] = { custom_clibs_1, slit_89, v_35 }; FX_CALL(fx_strjoin(0, 0, 0, strs_19, 3, &custom_clibs_2), _fx_cleanup); } } fx_str_t slit_90 = FX_MAKE_STR(" "); { const fx_str_t strs_20[] = { clibs_3, slit_90, custom_clibs_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_20, 3, &clibs_4), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_4, &v_36, 0), _fx_cleanup); fx_str_t slit_91 = FX_MAKE_STR("Linking the app with flags="); { const fx_str_t strs_21[] = { slit_91, v_36 }; FX_CALL(fx_strjoin(0, 0, 0, strs_21, 2, &v_37), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_37, 0), _fx_cleanup); if (any_cpp_0) { fx_copy_str(&cpp_comp_0, &v_38); } else { fx_copy_str(&c_comp_0, &v_38); } fx_copy_str(&_fx_g12Options__opt.app_filename, &v_39); fx_str_t slit_92 = FX_MAKE_STR(" "); { const fx_str_t strs_22[] = { v_38, slit_92, appname_opt_0, v_39 }; FX_CALL(fx_strjoin(0, 0, 0, strs_22, 4, &cmd_0), _fx_cleanup); } fx_str_t slit_93 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_93, objs_0, &v_40, 0), _fx_cleanup); fx_str_t slit_94 = FX_MAKE_STR(" "); fx_str_t slit_95 = FX_MAKE_STR(" "); { const fx_str_t strs_23[] = { cmd_0, slit_94, v_40, slit_95, clibs_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_23, 5, &cmd_1), _fx_cleanup); } int_ v_68; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_1, &v_68, 0), _fx_cleanup); *fx_result = v_68 == 0; } _fx_cleanup: ; FX_FREE_STR(&osinfo_0); FX_FREE_STR(&runtime_include_path_0); FX_FREE_STR(&runtime_lib_path_0); FX_FREE_STR(&runtime_impl_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); _fx_free_Ta9S(&v_0); FX_FREE_STR(&opt_flags_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_STR(&v_4); FX_FREE_STR(&cflags_0); _fx_free_Ta4S(&v_5); _fx_free_Ta2S(&v_6); FX_FREE_STR(&omp_cflags_0); FX_FREE_STR(&omp_lib_0); _fx_free_Ta3S(&v_7); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&libpath_0); FX_FREE_STR(&cflags_1); FX_FREE_STR(&clibs_0); FX_FREE_STR(&omp_flags_0); FX_FREE_STR(&os_0); FX_FREE_STR(&libpath_1); FX_FREE_STR(&cflags_2); FX_FREE_STR(&clibs_1); FX_FREE_STR(&ggdb_opt_0); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); FX_FREE_STR(&v_14); FX_FREE_STR(&cflags_3); FX_FREE_STR(&v_15); FX_FREE_STR(&v_16); FX_FREE_STR(&v_17); FX_FREE_STR(&v_18); FX_FREE_STR(&clibs_2); FX_FREE_STR(&c_comp_0); FX_FREE_STR(&cpp_comp_0); FX_FREE_STR(&obj_ext_0); FX_FREE_STR(&obj_opt_0); FX_FREE_STR(&appname_opt_0); FX_FREE_STR(&link_lib_opt_0); FX_FREE_STR(&cflags_4); FX_FREE_STR(&clibs_3); FX_FREE_STR(&custom_cflags_0); FX_FREE_STR(&v_19); FX_FREE_STR(&custom_cflags_1); FX_FREE_STR(&v_20); FX_FREE_STR(&cflags_5); FX_FREE_STR(&v_21); FX_FREE_STR(&v_22); FX_FREE_STR(&v_23); _fx_free_R14Ast__pragmas_t(&v_24); _fx_free_R17C_form__cmodule_t(&runtime_pseudo_cmod_0); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } FX_FREE_ARR(&v_25); FX_FREE_ARR(&results_0); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_free_T5BBLSBLS(&v_26); if (all_clibs_0) { _fx_free_LS(&all_clibs_0); } if (objs_0) { _fx_free_LS(&objs_0); } FX_FREE_STR(&v_27); FX_FREE_STR(&v_28); FX_FREE_STR(&v_29); FX_FREE_STR(&v_30); FX_FREE_STR(&custom_clibs_0); FX_FREE_STR(&v_31); FX_FREE_STR(&custom_clibs_1); FX_FREE_STR(&v_32); FX_FREE_STR(&custom_clibs_2); if (v_33) { _fx_free_LS(&v_33); } if (v_34) { _fx_free_LS(&v_34); } FX_FREE_STR(&v_35); FX_FREE_STR(&clibs_4); FX_FREE_STR(&v_36); FX_FREE_STR(&v_37); FX_FREE_STR(&v_38); FX_FREE_STR(&v_39); FX_FREE_STR(&cmd_0); FX_FREE_STR(&v_40); FX_FREE_STR(&cmd_1); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11process_allB1S(fx_str_t* fname0_0, bool* fx_result, void* fx_fv) { fx_exn_t exn_0 = {0}; _fx_LE __fold_result___0 = 0; _fx_LE v_0 = 0; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; int fx_status = 0; FX_CALL(_fx_M3AstFM8init_allv0(0), _fx_cleanup); _fx_T2SLS v_3 = {0}; fx_str_t ficus_root_0 = {0}; _fx_LS ficus_path_0 = 0; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_exn_t v_7 = {0}; _fx_LT2iLi graph_0 = 0; _fx_Li v_8 = 0; _fx_Li v_9 = 0; _fx_Li v_10 = 0; _fx_LS v_11 = 0; fx_str_t modules_used_0 = {0}; fx_str_t parsing_complete_0 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; _fx_T2LR17K_form__kmodule_tB v_14 = {0}; _fx_LR17K_form__kmodule_t kmods_0 = 0; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_str_t v_15 = {0}; _fx_T2LR17K_form__kmodule_tB v_16 = {0}; fx_str_t v_17 = {0}; _fx_LR17K_form__kmodule_t kmods_2 = 0; fx_str_t v_18 = {0}; _fx_T2LR17C_form__cmodule_tB v_19 = {0}; fx_str_t v_20 = {0}; _fx_LR17C_form__cmodule_t cmods_0 = 0; fx_str_t v_21 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; _fx_LR17C_form__cmodule_t cmods_2 = 0; _fx_LR17C_form__cmodule_t cmods_3 = 0; fx_str_t appname_0 = {0}; fx_str_t v_22 = {0}; fx_str_t appname_1 = {0}; _fx_LS v_23 = 0; fx_str_t cmd_0 = {0}; _fx_LE __fold_result___1 = 0; _fx_LE v_24 = 0; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; FX_CALL(_fx_M8CompilerFM15find_ficus_dirsT2SLS0(&v_3, 0), _fx_catch_8); fx_copy_str(&v_3.t0, &ficus_root_0); FX_COPY_PTR(v_3.t1, &ficus_path_0); if (FX_STR_LENGTH(ficus_root_0) == 0) { FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_4, 0), _fx_catch_8); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_5, 0), _fx_catch_8); fx_str_t slit_0 = FX_MAKE_STR("Ficus root directory is not found.\n" U"Please, add the directory \'lib\' containing Builtins.fx to\n" U"\'FICUS_PATH\' environment variable or make sure that either\n" U"1. \'ficus\' executable is put in a directory <ficus_root>/bin\n" U"and there are <ficus_root>/runtime and <ficus_root>/lib.\n" U"2. or \'ficus\' executable is in (/usr|/usr/local|/opt|...)/bin and\n" U" there are (/usr|...)/lib/ficus-"); fx_str_t slit_1 = FX_MAKE_STR("."); fx_str_t slit_2 = FX_MAKE_STR("/{runtime, lib}"); { const fx_str_t strs_0[] = { slit_0, v_4, slit_1, v_5, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 5, &v_6), _fx_catch_8); } FX_CALL(_fx_F9make_FailE1S(&v_6, &v_7), _fx_catch_8); FX_THROW(&v_7, true, _fx_catch_8); } bool ok_0; FX_CALL(_fx_M8CompilerFM9parse_allB2SLS(fname0_0, ficus_path_0, &ok_0, 0), _fx_catch_8); if (!ok_0) { FX_THROW(&_fx_E30Compiler__CumulativeParseErrorv, false, _fx_catch_8); } _fx_LT2iLi lstend_0 = 0; int_ ni_0 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); _fx_N16Ast__defmodule_t* ptr_all_modules_0 = FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, 0); for (int_ i_0 = 0; i_0 < ni_0; i_0++) { _fx_N16Ast__defmodule_t minfo_0 = 0; _fx_Li v_27 = 0; _fx_T2iLi tup_0 = {0}; FX_COPY_PTR(ptr_all_modules_0[i_0], &minfo_0); FX_COPY_PTR(minfo_0->u.defmodule_t.t5, &v_27); _fx_make_T2iLi(minfo_0->u.defmodule_t.t2, v_27, &tup_0); _fx_LT2iLi node_0 = 0; FX_CALL(_fx_cons_LT2iLi(&tup_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(graph_0, lstend_0, node_0); _fx_catch_0: ; _fx_free_T2iLi(&tup_0); FX_FREE_LIST_SIMPLE(&v_27); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_CHECK_EXN(_fx_catch_8); } FX_CALL(_fx_M8CompilerFM8toposortLi1LT2iLi(graph_0, &v_8, 0), _fx_catch_8); if (v_8 != 0) { FX_COPY_PTR(v_8->tl, &v_9); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8); } FX_CHECK_EXN(_fx_catch_8); if (v_9 != 0) { FX_COPY_PTR(v_9->tl, &v_10); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8); } FX_CHECK_EXN(_fx_catch_8); FX_FREE_LIST_SIMPLE(&_fx_g23Ast__all_modules_sorted); FX_COPY_PTR(v_10, &_fx_g23Ast__all_modules_sorted); if (_fx_g12Options__opt.print_ast0) { _fx_Li lst_0 = _fx_g23Ast__all_modules_sorted; for (; lst_0; lst_0 = lst_0->tl) { _fx_N16Ast__defmodule_t minfo_1 = 0; int_ m_0 = lst_0->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_0, &minfo_1, 0), _fx_catch_1); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_1, 0), _fx_catch_1); _fx_catch_1: ; if (minfo_1) { _fx_free_N16Ast__defmodule_t(&minfo_1); } FX_CHECK_EXN(_fx_catch_8); } } _fx_LS lstend_1 = 0; _fx_Li lst_1 = _fx_g23Ast__all_modules_sorted; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ m_idx_0 = lst_1->hd; _fx_R9Ast__id_t v_28; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(m_idx_0, &v_28, 0), _fx_catch_2); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_28, &res_0, 0), _fx_catch_2); _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_11, lstend_1, node_1); _fx_catch_2: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_catch_8); } fx_str_t slit_3 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_3, v_11, &modules_used_0, 0), _fx_catch_8); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_4 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_4, &parsing_complete_0); } else { fx_str_t slit_5 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_5, &parsing_complete_0); } fx_str_t slit_6 = FX_MAKE_STR(". Modules used: "); { const fx_str_t strs_1[] = { parsing_complete_0, slit_6, modules_used_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 3, &v_12), _fx_catch_8); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_12, 0), _fx_catch_8); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; _fx_Li lst_2 = _fx_g23Ast__all_modules_sorted; for (; lst_2; lst_2 = lst_2->tl) { int_ m_1 = lst_2->hd; FX_CALL(_fx_M13Ast_typecheckFM9check_modv1i(m_1, 0), _fx_catch_3); _fx_catch_3: ; FX_CHECK_EXN(_fx_catch_8); } bool ok_1 = _fx_g21Ast__all_compile_errs == 0; if (ok_1) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_7 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_7, &v_13); } else { fx_str_t slit_8 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_8, &v_13); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_13, 0), _fx_catch_8); if (_fx_g12Options__opt.print_ast) { _fx_Li lst_3 = _fx_g23Ast__all_modules_sorted; for (; lst_3; lst_3 = lst_3->tl) { _fx_N16Ast__defmodule_t minfo_2 = 0; int_ m_2 = lst_3->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_2, &minfo_2, 0), _fx_catch_4); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_2, 0), _fx_catch_4); _fx_catch_4: ; if (minfo_2) { _fx_free_N16Ast__defmodule_t(&minfo_2); } FX_CHECK_EXN(_fx_catch_8); } } } if (ok_1) { _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6K_formFM13init_all_idksv0(0), _fx_catch_8); FX_CALL(_fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li(_fx_g23Ast__all_modules_sorted, &kmods_0, 0), _fx_catch_8); _fx_make_T2LR17K_form__kmodule_tB(kmods_0, _fx_g21Ast__all_compile_errs == 0, &v_14); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_14); } FX_COPY_PTR(v_14.t0, &kmods_1); bool ok_2 = v_14.t1; if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_9, &v_15); } else { fx_str_t slit_10 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_10, &v_15); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_15, 0), _fx_catch_8); if (_fx_g12Options__opt.print_k0) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_1, 0), _fx_catch_8); } } if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_11 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_11, &v_17); } else { fx_str_t slit_12 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_12, &v_17); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_17, 0), _fx_catch_8); FX_CALL(_fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t(kmods_1, &v_16, 0), _fx_catch_8); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_16); } FX_COPY_PTR(v_16.t0, &kmods_2); bool ok_3 = v_16.t1; if (ok_3) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_13 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_13, &v_18); } else { fx_str_t slit_14 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_14, &v_18); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_18, 0), _fx_catch_8); if (_fx_g12Options__opt.print_k) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_2, 0), _fx_catch_8); } } bool ok_4; if (!_fx_g12Options__opt.gen_c) { ok_4 = ok_3; } else { if (ok_3) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_15 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_15, &v_20); } else { fx_str_t slit_16 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_16, &v_20); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_20, 0), _fx_catch_8); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6C_formFM13init_all_idcsv0(0), _fx_catch_8); FX_CALL(_fx_M9C_gen_stdFM14init_std_namesv0(0), _fx_catch_8); FX_CALL(_fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t(kmods_2, &cmods_0, 0), _fx_catch_8); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_17 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_17, &v_21); } else { fx_str_t slit_18 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_18, &v_21); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_catch_8); FX_CALL(_fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_0, &cmods_1, 0), _fx_catch_8); _fx_LR17C_form__cmodule_t lstend_2 = 0; _fx_LR17C_form__cmodule_t lst_4 = cmods_1; for (; lst_4; lst_4 = lst_4->tl) { _fx_R17C_form__cmodule_t t_0 = {0}; _fx_R17C_form__cmodule_t* cmod_0 = &lst_4->hd; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = cmod_0->cmod_pragmas.pragma_cpp; } if (is_cpp_0) { FX_CALL(_fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t(cmod_0, &t_0, 0), _fx_catch_5); } else { _fx_copy_R17C_form__cmodule_t(cmod_0, &t_0); } _fx_LR17C_form__cmodule_t node_2 = 0; FX_CALL(_fx_cons_LR17C_form__cmodule_t(&t_0, 0, false, &node_2), _fx_catch_5); FX_LIST_APPEND(cmods_2, lstend_2, node_2); _fx_catch_5: ; _fx_free_R17C_form__cmodule_t(&t_0); FX_CHECK_EXN(_fx_catch_8); } fx_str_t slit_19 = FX_MAKE_STR("\tConversion to C-form complete"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_catch_8); _fx_make_T2LR17C_form__cmodule_tB(cmods_2, _fx_g21Ast__all_compile_errs == 0, &v_19); } else { _fx_make_T2LR17C_form__cmodule_tB(0, false, &v_19); } FX_COPY_PTR(v_19.t0, &cmods_3); bool ok_5 = v_19.t1; bool t_1; if (ok_5) { if (_fx_g12Options__opt.make_app) { t_1 = true; } else { t_1 = _fx_g12Options__opt.run_app; } } else { t_1 = false; } bool ok_6; if (t_1) { FX_CALL(_fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS(cmods_3, &ficus_root_0, &ok_6, 0), _fx_catch_8); } else { ok_6 = ok_5; } bool t_2; if (ok_6) { t_2 = _fx_g12Options__opt.run_app; } else { t_2 = false; } if (t_2) { fx_copy_str(&_fx_g12Options__opt.app_filename, &appname_0); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_22, 0), _fx_catch_8); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_22, &appname_0, &appname_1, 0), _fx_catch_8); FX_COPY_PTR(_fx_g12Options__opt.app_args, &v_23); FX_CALL(_fx_cons_LS(&appname_1, v_23, false, &v_23), _fx_catch_8); fx_str_t slit_20 = FX_MAKE_STR(" "); FX_CALL(_fx_F4joinS2SLS(&slit_20, v_23, &cmd_0, 0), _fx_catch_8); int_ v_29; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_0, &v_29, 0), _fx_catch_8); ok_4 = v_29 == 0; } else { ok_4 = ok_6; } } if (!ok_4) { int_ nerrs_0 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_0 != 0) { _fx_LE lst_5 = _fx_g21Ast__all_compile_errs; for (; lst_5; lst_5 = lst_5->tl) { _fx_LE r_0 = 0; fx_exn_t* a_0 = &lst_5->hd; FX_COPY_PTR(__fold_result___1, &r_0); FX_CALL(_fx_cons_LE(a_0, r_0, false, &r_0), _fx_catch_6); _fx_free_LE(&__fold_result___1); FX_COPY_PTR(r_0, &__fold_result___1); _fx_catch_6: ; if (r_0) { _fx_free_LE(&r_0); } FX_CHECK_EXN(_fx_catch_8); } FX_COPY_PTR(__fold_result___1, &v_24); _fx_LE lst_6 = v_24; for (; lst_6; lst_6 = lst_6->tl) { fx_exn_t* x_0 = &lst_6->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_0, 0), _fx_catch_7); _fx_catch_7: ; FX_CHECK_EXN(_fx_catch_8); } FX_CALL(_fx_F6stringS1i(nerrs_0, &v_25, 0), _fx_catch_8); fx_str_t slit_21 = FX_MAKE_STR("\n"); fx_str_t slit_22 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_2[] = { slit_21, v_25, slit_22 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_26), _fx_catch_8); } _fx_F12print_stringv1S(&v_26, 0); fx_str_t slit_23 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_23, 0); } } *fx_result = ok_4; _fx_catch_8: ; _fx_free_T2SLS(&v_3); FX_FREE_STR(&ficus_root_0); if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); fx_free_exn(&v_7); if (graph_0) { _fx_free_LT2iLi(&graph_0); } FX_FREE_LIST_SIMPLE(&v_8); FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&v_10); if (v_11) { _fx_free_LS(&v_11); } FX_FREE_STR(&modules_used_0); FX_FREE_STR(&parsing_complete_0); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); _fx_free_T2LR17K_form__kmodule_tB(&v_14); if (kmods_0) { _fx_free_LR17K_form__kmodule_t(&kmods_0); } if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } FX_FREE_STR(&v_15); _fx_free_T2LR17K_form__kmodule_tB(&v_16); FX_FREE_STR(&v_17); if (kmods_2) { _fx_free_LR17K_form__kmodule_t(&kmods_2); } FX_FREE_STR(&v_18); _fx_free_T2LR17C_form__cmodule_tB(&v_19); FX_FREE_STR(&v_20); if (cmods_0) { _fx_free_LR17C_form__cmodule_t(&cmods_0); } FX_FREE_STR(&v_21); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } if (cmods_2) { _fx_free_LR17C_form__cmodule_t(&cmods_2); } if (cmods_3) { _fx_free_LR17C_form__cmodule_t(&cmods_3); } FX_FREE_STR(&appname_0); FX_FREE_STR(&v_22); FX_FREE_STR(&appname_1); if (v_23) { _fx_free_LS(&v_23); } FX_FREE_STR(&cmd_0); if (__fold_result___1) { _fx_free_LE(&__fold_result___1); } if (v_24) { _fx_free_LE(&v_24); } FX_FREE_STR(&v_25); FX_FREE_STR(&v_26); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int_ nerrs_1 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_1 != 0) { _fx_LE lst_7 = _fx_g21Ast__all_compile_errs; for (; lst_7; lst_7 = lst_7->tl) { _fx_LE r_1 = 0; fx_exn_t* a_1 = &lst_7->hd; FX_COPY_PTR(__fold_result___0, &r_1); FX_CALL(_fx_cons_LE(a_1, r_1, false, &r_1), _fx_catch_9); _fx_free_LE(&__fold_result___0); FX_COPY_PTR(r_1, &__fold_result___0); _fx_catch_9: ; if (r_1) { _fx_free_LE(&r_1); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, &v_0); _fx_LE lst_8 = v_0; for (; lst_8; lst_8 = lst_8->tl) { fx_exn_t* x_1 = &lst_8->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_1, 0), _fx_catch_10); _fx_catch_10: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_F6stringS1i(nerrs_1, &v_1, 0), _fx_cleanup); fx_str_t slit_24 = FX_MAKE_STR("\n"); fx_str_t slit_25 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_3[] = { slit_24, v_1, slit_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_2), _fx_cleanup); } _fx_F12print_stringv1S(&v_2, 0); fx_str_t slit_26 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_26, 0); } int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E4Fail) { fx_str_t v_30 = {0}; fx_str_t slit_27 = FX_MAKE_STR(": "); fx_str_t* msg_0 = &FX_EXN_DATA(_fx_E4Fail_data_t, exn_0.data); { const fx_str_t strs_4[] = { _fx_g15Compiler__error, slit_27, *msg_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_30), _fx_catch_11); } _fx_F12print_stringv1S(&v_30, 0); fx_str_t slit_28 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_28, 0); _fx_catch_11: ; FX_FREE_STR(&v_30); } else if (tag_0 == _FX_EXN_E17Ast__CompileError) { FX_CALL(_fx_M3AstFM17print_compile_errv1E(&exn_0, 0), _fx_catch_12); _fx_catch_12: ; } else if (tag_0 != _FX_EXN_E30Compiler__CumulativeParseError) { fx_str_t v_31 = {0}; fx_str_t v_32 = {0}; FX_CALL(_fx_F6stringS1E(&exn_0, &v_31, 0), _fx_catch_13); fx_str_t slit_29 = FX_MAKE_STR("\n" U"\n"); fx_str_t slit_30 = FX_MAKE_STR(": Exception "); fx_str_t slit_31 = FX_MAKE_STR(" occured"); { const fx_str_t strs_5[] = { slit_29, _fx_g15Compiler__error, slit_30, v_31, slit_31 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_32), _fx_catch_13); } _fx_F12print_stringv1S(&v_32, 0); fx_str_t slit_32 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_32, 0); _fx_catch_13: ; FX_FREE_STR(&v_32); FX_FREE_STR(&v_31); } FX_CHECK_EXN(_fx_cleanup); *fx_result = false; } _fx_cleanup: ; fx_free_exn(&exn_0); if (__fold_result___0) { _fx_free_LE(&__fold_result___0); } if (v_0) { _fx_free_LE(&v_0); } FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); return fx_status; } FX_EXTERN_C int fx_init_Compiler(void) { FX_REG_SIMPLE_EXN("Compiler.CumulativeParseError", _FX_EXN_E30Compiler__CumulativeParseError, _fx_E30Compiler__CumulativeParseError_info, _fx_E30Compiler__CumulativeParseErrorv); int fx_status = 0; FX_CALL(_fx_M3SysFM9colortermB0(&_fx_g21Compiler__iscolorterm, 0), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR("error"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_0, &_fx_g15Compiler__error, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C void fx_deinit_Compiler(void) { FX_FREE_STR(&_fx_g15Compiler__error); }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; size_t number_channels; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; number_channels=MagickMin(MagickMin(MagickMin( Ar_image->number_channels,Ai_image->number_channels),MagickMin( Br_image->number_channels,Bi_image->number_channels)),MagickMin( Cr_image->number_channels,Ci_image->number_channels)); Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L) #endif for (y=0; y < (ssize_t) Cr_image->rows; y++) { register const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register Quantum *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) Cr_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_channels; i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Bi[i]); break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal((double) Br[i]*Br[i]+Bi[i]*Bi[i]+snr); Cr[i]=gamma*((double) Ar[i]*Br[i]+(double) Ai[i]*Bi[i]); Ci[i]=gamma*((double) Ai[i]*Br[i]-(double) Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt((double) Ar[i]*Ar[i]+(double) Ai[i]*Ai[i]); Ci[i]=atan2((double) Ai[i],(double) Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=QuantumScale*((double) Ar[i]*Br[i]-(double) Ai[i]*Bi[i]); Ci[i]=QuantumScale*((double) Ai[i]*Br[i]+(double) Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register Quantum *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register Quantum *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
DRB116-target-teams-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "omprace.h" #include <omp.h> #include <stdio.h> //Temporarily turn the team clause into parallel since we do not support //the teams construct /* use of omp target + teams Without protection, master threads from two teams cause data races. Data race pair: a@66:5 vs. a@66:5 */ int main(int argc, char* argv[]) { omprace_init(); int i; int len = 100; double a[len]; /*Initialize with some values*/ for (i=0; i<len; i++) a[i]= ((double)i)/2.0; //#pragma omp target map(tofrom: a[0:len]) //#pragma omp teams num_teams(2) #pragma omp parallel num_threads(2) { a[50]*=2.0; } printf ("a[50]=%f\n", a[50]); omprace_fini(); return 0; }
hello_parallel.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel //CREATE A PARALLEL REGION { printf("\nhello world "); printf("hello from thread number %d \n", omp_get_thread_num()); fflush(stdout); } printf("\ngoodbye world \n\n"); return 0; }
task_single_producer_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See COPYRIGHT in top-level directory. */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 5000000 #define NUM_REPS 1 void sscal(float value, float *a) { *a = *a * value; } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { sleep(2); printf("Thread %d\n", omp_get_thread_num()); time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { printf("Task %d executed by Thread %d Stolen? %s\n", i, omp_get_thread_num(), (i % nthreads == omp_get_thread_num()) ? "NO" : "YES"); sscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); return EXIT_SUCCESS; }
par_vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParVectorCreate *--------------------------------------------------------------------------*/ /* If create is called and partitioning is NOT null, then it is assumed that it is array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParVector * hypre_ParVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning ) { hypre_ParVector *vector; HYPRE_Int num_procs, my_id; if (global_size < 0) { hypre_error_in_arg(2); return NULL; } vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); if (!partitioning) { hypre_MPI_Comm_size(comm,&num_procs); hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning); } hypre_ParVectorAssumedPartition(vector) = NULL; hypre_ParVectorComm(vector) = comm; hypre_ParVectorGlobalSize(vector) = global_size; hypre_ParVectorFirstIndex(vector) = partitioning[0]; hypre_ParVectorLastIndex(vector) = partitioning[1]-1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[1] - partitioning[0]); /* set defaults */ hypre_ParVectorOwnsData(vector) = 1; hypre_ParVectorOwnsPartitioning(vector) = 1; hypre_ParVectorActualLocalSize(vector) = 0; return vector; } /*-------------------------------------------------------------------------- * hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParMultiVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning, HYPRE_Int num_vectors ) { /* note that global_size is the global length of a single vector */ hypre_ParVector *vector = hypre_ParVectorCreate( comm, global_size, partitioning ); hypre_ParVectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorDestroy( hypre_ParVector *vector ) { if (vector) { if ( hypre_ParVectorOwnsData(vector) ) { hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector)); } if ( hypre_ParVectorOwnsPartitioning(vector) ) { hypre_TFree(hypre_ParVectorPartitioning(vector), HYPRE_MEMORY_HOST); } if (hypre_ParVectorAssumedPartition(vector)) { hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector)); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorInitialize_v2( hypre_ParVector *vector, HYPRE_MemoryLocation memory_location ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_SeqVectorInitialize_v2(hypre_ParVectorLocalVector(vector), memory_location); hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector)); return hypre_error_flag; } HYPRE_Int hypre_ParVectorInitialize( hypre_ParVector *vector ) { return hypre_ParVectorInitialize_v2(vector, hypre_ParVectorMemoryLocation(vector)); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetDataOwner( hypre_ParVector *vector, HYPRE_Int owns_data ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsData(vector) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetPartitioningOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector, HYPRE_Int owns_partitioning ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetNumVectors * call before calling hypre_ParVectorInitialize * probably this will do more harm than good, use hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ #if 0 HYPRE_Int hypre_ParVectorSetNumVectors( hypre_ParVector *vector, HYPRE_Int num_vectors ) { HYPRE_Int ierr=0; hypre_Vector *local_vector = hypre_ParVectorLocalVector(v); hypre_SeqVectorSetNumVectors( local_vector, num_vectors ); return ierr; } #endif /*-------------------------------------------------------------------------- * hypre_ParVectorRead *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorRead( MPI_Comm comm, const char *file_name ) { char new_file_name[80]; hypre_ParVector *par_vector; HYPRE_Int my_id, num_procs; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; HYPRE_Int i; FILE *fp; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "r"); hypre_fscanf(fp, "%b\n", &global_size); for (i=0; i < 2; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_ParVectorComm(par_vector) = comm; hypre_ParVectorGlobalSize(par_vector) = global_size; hypre_ParVectorFirstIndex(par_vector) = partitioning[0]; hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1; hypre_ParVectorPartitioning(par_vector) = partitioning; hypre_ParVectorOwnsData(par_vector) = 1; hypre_ParVectorOwnsPartitioning(par_vector) = 1; hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 ); return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrint( hypre_ParVector *vector, const char *file_name ) { char new_file_name[80]; hypre_Vector *local_vector; MPI_Comm comm; HYPRE_Int my_id, num_procs, i; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; FILE *fp; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(vector); comm = hypre_ParVectorComm(vector); partitioning = hypre_ParVectorPartitioning(vector); global_size = hypre_ParVectorGlobalSize(vector); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_SeqVectorPrint(local_vector,new_file_name); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "w"); hypre_fprintf(fp, "%b\n", global_size); for (i=0; i < 2; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); fclose (fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetConstantValues( hypre_ParVector *v, HYPRE_Complex value ) { hypre_Vector *v_local = hypre_ParVectorLocalVector(v); return hypre_SeqVectorSetConstantValues(v_local,value); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetRandomValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetRandomValues( hypre_ParVector *v, HYPRE_Int seed ) { HYPRE_Int my_id; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); MPI_Comm comm = hypre_ParVectorComm(v); hypre_MPI_Comm_rank(comm,&my_id); seed *= (my_id+1); return hypre_SeqVectorSetRandomValues(v_local, seed); } /*-------------------------------------------------------------------------- * hypre_ParVectorCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorCopy( hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorCopy(x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorCloneShallow * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using * the partitioning and data arrays of x *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParVectorCloneShallow( hypre_ParVector *x ) { hypre_ParVector * y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; /* ...This vector owns its local vector, although the local vector doesn't * own _its_ data */ hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(hypre_ParVectorLocalVector(x) ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); return y; } hypre_ParVector * hypre_ParVectorCloneDeep_v2( hypre_ParVector *x, HYPRE_MemoryLocation memory_location ) { hypre_ParVector *y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneDeep_v2( hypre_ParVectorLocalVector(x), memory_location ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); //RL: WHY HERE? return y; } HYPRE_Int hypre_ParVectorMigrate(hypre_ParVector *x, HYPRE_MemoryLocation memory_location) { if (!x) { return hypre_error_flag; } if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(hypre_ParVectorMemoryLocation(x)) ) { hypre_Vector *x_local = hypre_SeqVectorCloneDeep_v2(hypre_ParVectorLocalVector(x), memory_location); hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(x)); hypre_ParVectorLocalVector(x) = x_local; } else { hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(x)) = memory_location; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorScale( HYPRE_Complex alpha, hypre_ParVector *y ) { hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorScale( alpha, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorAxpy( HYPRE_Complex alpha, hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorAxpy( alpha, x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParVectorInnerProd( hypre_ParVector *x, hypre_ParVector *y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real result = 0.0; HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif return result; } /*-------------------------------------------------------------------------- * hypre_ParVectorElmdivpy * y = y + x ./ b [MATLAB Notation] *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorElmdivpy( hypre_ParVector *x, hypre_ParVector *b, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorElmdivpy(x_local, b_local, y_local); } /*-------------------------------------------------------------------------- * hypre_VectorToParVector: * generates a ParVector from a Vector on proc 0 and distributes the pieces * to the other procs in comm *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_VectorToParVector ( MPI_Comm comm, hypre_Vector *v, HYPRE_BigInt *vec_starts ) { HYPRE_BigInt global_size; HYPRE_BigInt *global_vec_starts = NULL; HYPRE_BigInt first_index; HYPRE_BigInt last_index; HYPRE_Int local_size; HYPRE_Int num_vectors; HYPRE_Int num_procs, my_id; HYPRE_Int global_vecstride, vecstride, idxstride; hypre_ParVector *par_vector; hypre_Vector *local_vector; HYPRE_Complex *v_data; HYPRE_Complex *local_data; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; HYPRE_Int i, j, k, p; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == 0) { global_size = (HYPRE_BigInt)hypre_VectorSize(v); v_data = hypre_VectorData(v); num_vectors = hypre_VectorNumVectors(v); /* for multivectors */ global_vecstride = hypre_VectorVectorStride(v); } hypre_MPI_Bcast(&global_size,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&num_vectors,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&global_vecstride,1,HYPRE_MPI_INT,0,comm); if ( num_vectors == 1 ) par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts); else par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors); vec_starts = hypre_ParVectorPartitioning(par_vector); first_index = hypre_ParVectorFirstIndex(par_vector); last_index = hypre_ParVectorLastIndex(par_vector); local_size = (HYPRE_Int)(last_index - first_index) + 1; if (my_id == 0) { global_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); } hypre_MPI_Gather(&first_index, 1, HYPRE_MPI_BIG_INT, global_vec_starts, 1, HYPRE_MPI_BIG_INT, 0, comm); if (my_id == 0) { global_vec_starts[num_procs] = hypre_ParVectorGlobalSize(par_vector); } hypre_ParVectorInitialize(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); local_data = hypre_VectorData(local_vector); vecstride = hypre_VectorVectorStride(local_vector); idxstride = hypre_VectorIndexStride(local_vector); /* so far the only implemented multivector StorageMethod is 0 */ hypre_assert( idxstride==1 ); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); k = 0; for (p = 1; p<num_procs; p++) for (j = 0; j<num_vectors; ++j) { hypre_MPI_Isend( &v_data[(HYPRE_Int) global_vec_starts[p]] + j*global_vecstride, (HYPRE_Int)(global_vec_starts[p+1] - global_vec_starts[p]), HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] ); } if (num_vectors == 1) { for (i = 0; i < local_size; i++) local_data[i] = v_data[i]; } else { for (j = 0; j<num_vectors; ++j) { for (i = 0; i < local_size; i++) local_data[i+j*vecstride] = v_data[i+j*global_vecstride]; } } hypre_MPI_Waitall(num_procs-1,requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } else { for ( j=0; j<num_vectors; ++j ) hypre_MPI_Recv( local_data+j*vecstride, local_size, HYPRE_MPI_COMPLEX, 0, 0, comm,&status0 ); } if (global_vec_starts) { hypre_TFree(global_vec_starts, HYPRE_MEMORY_HOST); } return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorToVectorAll: * generates a Vector on every proc which has a piece of the data * from a ParVector on several procs in comm, * vec_starts needs to contain the partitioning across all procs in comm *--------------------------------------------------------------------------*/ hypre_Vector * hypre_ParVectorToVectorAll( hypre_ParVector *par_v ) { MPI_Comm comm = hypre_ParVectorComm(par_v); HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v); hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v); HYPRE_Int num_procs, my_id; HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v); hypre_Vector *vector; HYPRE_Complex *vector_data; HYPRE_Complex *local_data; HYPRE_Int local_size; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int i, j; HYPRE_Int *used_procs; HYPRE_Int num_types, num_requests; HYPRE_Int vec_len, proc_id; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 112, tag2 = 223; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) - hypre_ParVectorFirstIndex(par_v) + 1); /* determine procs which hold data of par_v and store ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_size > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParVectorLastIndex(par_v); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToVectorAll; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), //0, &response_obj, sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_size) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = (HYPRE_Int)send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_Int)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_size) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this vector should be rather small */ local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate((HYPRE_Int)global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); num_requests = 2*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector - here we send to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], tag2, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrintIJ( hypre_ParVector *vector, HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt global_size, j; HYPRE_BigInt *partitioning; HYPRE_Complex *local_data; HYPRE_Int myid, num_procs, i, part0; char new_filename[255]; FILE *file; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParVectorComm(vector); global_size = hypre_ParVectorGlobalSize(vector); partitioning = hypre_ParVectorPartitioning(vector); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector)); hypre_fprintf(file, "%b \n", global_size); for (i=0; i < 2; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } hypre_fprintf(file, "\n"); part0 = partitioning[0]; for (j = part0; j < partitioning[1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorReadIJ * Warning: wrong base for assumed partition if base > 0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_j_ptr, hypre_ParVector **vector_ptr ) { HYPRE_BigInt global_size, J; hypre_ParVector *vector; hypre_Vector *local_vector; HYPRE_Complex *local_data; HYPRE_BigInt *partitioning; HYPRE_Int base_j; HYPRE_Int myid, num_procs, i, j; char new_filename[255]; FILE *file; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b", &global_size); /* this may need to be changed so that the base is available in the file! */ partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 0; i < 2; i++) { hypre_fscanf(file, "%b", partitioning+i); } /* This is not yet implemented correctly! */ base_j = 0; vector = hypre_ParVectorCreate(comm, global_size, partitioning); hypre_ParVectorInitialize(vector); local_vector = hypre_ParVectorLocalVector(vector); local_data = hypre_VectorData(local_vector); for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } fclose(file); *base_j_ptr = base_j; *vector_ptr = vector; /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToVectorAll * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToVectorAll( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the vector * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector ) { return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) ); } HYPRE_Int hypre_ParVectorGetValuesHost(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { HYPRE_Int i, ierr = 0; HYPRE_BigInt first_index = hypre_ParVectorFirstIndex(vector); HYPRE_BigInt last_index = hypre_ParVectorLastIndex(vector); hypre_Vector *local_vector = hypre_ParVectorLocalVector(vector); HYPRE_Complex *data = hypre_VectorData(local_vector); /* if (hypre_VectorOwnsData(local_vector) == 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues."); return hypre_error_flag; } */ if (indices) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:ierr) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_values; i++) { HYPRE_BigInt index = indices[i]; if (index < first_index || index > last_index) { ierr ++; } else { HYPRE_Int local_index = (HYPRE_Int) (index - first_index); values[i] = data[local_index]; } } if (ierr) { hypre_error_in_arg(3); hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Index out of range! -- hypre_ParVectorGetValues."); hypre_printf("Index out of range! -- hypre_ParVectorGetValues\n"); } } else { if (num_values > hypre_VectorSize(local_vector)) { hypre_error_in_arg(2); return hypre_error_flag; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_values; i++) { values[i] = data[i]; } } return hypre_error_flag; } HYPRE_Int hypre_ParVectorGetValues(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (HYPRE_EXEC_DEVICE == hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(vector) )) { hypre_ParVectorGetValuesDevice(vector, num_values, indices, values); } else #endif { hypre_ParVectorGetValuesHost(vector, num_values, indices, values); } return hypre_error_flag; }
valid.mob5.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_256_28_28_256_3_3.h" #include "gen_ukr_A4B2gemm_1_256_28_28_256_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 28; int Ny = 28; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<256+0;c5+=256) { for(int xy5=0;xy5<784+0;xy5+=784) { for(int f5=0;f5<256+0;f5+=256) { for(int c4=c5;c4<min(256, 256+c5);c4+=256) { for(int xy4=xy5;xy4<min(784, 784+xy5);xy4+=784) { for(int f4=f5;f4<min(256, 256+f5);f4+=Tf2) { for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1) { for(int xy3=xy4;xy3<min(784, 784+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(256, Tf2+f4);f3+=Tf2) { for(int xy2=xy3;xy2<min(784, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(256, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(784, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(256, 16+f2);f1+=16) { int ctile=min(Tc1, 256-c1); int x1=xy1/28; int y1=xy1%28/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*230400+c1_1*900+1*x1*30+1*y1*1+c1_2*1; int offsetB=0+kf1_1*36864+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*200704+of1_1*784+x1*28+y1*1+of1_2*1; if(28-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(28*28-xy1>=6){ for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
simple.c
/* Copyright IBM Corporation, 2019 * author : Bob Walkup */ #include <stdio.h> #include <stdlib.h> // add extern "C" for C++ codes void HPM_Init(void); void HPM_Start(char *); void HPM_Stop(char *); void HPM_Print(void); int main(int argc, char * argv[]) { double sum, * x, * y; int i, n; n = 100000000; x = (double *) malloc(n*sizeof(double)); y = (double *) malloc(n*sizeof(double)); #pragma omp parallel for for (i=0; i<n; i++) x[i] = 1.0 + (double) (i%10); #pragma omp parallel for for (i=0; i<n; i++) y[i] = 1.0 + (double) (i%20); HPM_Init(); HPM_Start("loop"); sum = 0; #pragma omp parallel for reduction(+:sum) for (i=0; i<n; i++) sum += x[i]*y[i]; HPM_Stop("loop"); printf("sum = %.6lf\n", sum); HPM_Print(); return 0; }
viter.c
#include "libimagequant.h" #include "pam.h" #include "viter.h" #include "nearest.h" #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif /* * Voronoi iteration: new palette color is computed from weighted average of colors that map to that palette entry. */ LIQ_PRIVATE void viter_init (const colormap * map, const unsigned int max_threads, viter_state average_color[]) { memset (average_color, 0, sizeof (average_color[0]) * (VITER_CACHE_LINE_GAP + map->colors) * max_threads); } LIQ_PRIVATE void viter_update_color (const f_pixel acolor, const float value, const colormap * map, unsigned int match, const unsigned int thread, viter_state average_color[]) { match += thread * (VITER_CACHE_LINE_GAP + map->colors); average_color[match].a += acolor.a * value; average_color[match].r += acolor.r * value; average_color[match].g += acolor.g * value; average_color[match].b += acolor.b * value; average_color[match].total += value; } LIQ_PRIVATE void viter_finalize (colormap * map, const unsigned int max_threads, const viter_state average_color[]) { for (unsigned int i = 0; i < map->colors; i++) { double a = 0, r = 0, g = 0, b = 0, total = 0; // Aggregate results from all threads for (unsigned int t = 0; t < max_threads; t++) { const unsigned int offset = (VITER_CACHE_LINE_GAP + map->colors) * t + i; a += average_color[offset].a; r += average_color[offset].r; g += average_color[offset].g; b += average_color[offset].b; total += average_color[offset].total; } if (total && !map->palette[i].fixed) { map->palette[i].acolor = (f_pixel) { .a = a / total,.r = r / total,.g = g / total,.b = b / total,}; } else { total = i / 1024.0; } map->palette[i].popularity = total; } } LIQ_PRIVATE double viter_do_iteration (histogram * hist, colormap * const map, const float min_opaque_val, viter_callback callback, const bool fast_palette) { viter_state *average_color; const unsigned int max_threads = omp_get_max_threads (); double total_diff = 0; average_color = g_alloca (sizeof (viter_state) * (VITER_CACHE_LINE_GAP + map->colors) * max_threads); viter_init (map, max_threads, average_color); { struct nearest_map *const n = nearest_init (map, fast_palette); hist_item *const achv = hist->achv; const int hist_size = hist->size; int j; #pragma omp parallel for if (hist_size > 3000) \ schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff) for (j = 0; j < hist_size; j++) { float diff; unsigned int match = nearest_search (n, achv[j].acolor, achv[j].tmp.likely_colormap_index, min_opaque_val, &diff); achv[j].tmp.likely_colormap_index = match; total_diff += diff * achv[j].perceptual_weight; viter_update_color (achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num (), average_color); if (callback) callback (&achv[j], diff); } nearest_free (n); } viter_finalize (map, max_threads, average_color); return total_diff / hist->total_perceptual_weight; }
machinelearning_function.h
int predict(struct network *net); double sigmoid_prime(double z); double sigmoid(double z); void train(struct network *net); void init(struct network *net); double randn(void); void feedforward(struct network *net); void back_pass(struct network *net); void backpropagation(struct network *net); void cost_report(struct network * net ); void report(struct network *net); int layersize[NUM_LAYER] = {INPUT_SIZE,HIDDEN_SIZE,OUTPUT_SIZE}; double randn(void) { double v1, v2, s; do { v1 = 2 * ((double) rand() / RAND_MAX) - 1; // -1.0 ~ 1.0 까지 v2 = 2 * ((double) rand() / RAND_MAX) - 1; // -1.0 ~ 1.0 까지의 값 s = v1 * v1 + v2 * v2; } while (s >= 1 || s == 0); s = sqrt( (-2 * log(s)) / s ); return v1 * s; } void init(struct network *net) { int i,j,k; int before_ac_weights = 0; int before_ac_neurals = 0; timeutils *t_feedforward = &net->t_feedforward; timeutils *t_back_pass = &net->t_back_pass; timeutils *t_backpropagation = &net->t_backpropagation; net->best_recog = 0.0; TIMER_INIT(t_feedforward); //시간 초기화 TIMER_INIT(t_back_pass); TIMER_INIT(t_backpropagation); net->cost_rate = 0; net->num_layer = NUM_LAYER; net->layer_size = (int *)layersize; net->learning_rate = LEARNING_RATE; net->mini_batch_size = MINI_BATCH_SIZE; net->epoch = EPOCH; net->ac_weight = (int *) malloc(sizeof(double) * net->num_layer); net->ac_neuron = (int *) malloc(sizeof(double) * net->num_layer); net->thread = (int *)malloc(sizeof(int) * THREAD_MODE_NUM); net->mode = (int *)malloc(sizeof(int)*MODE_NUM); net->record_random = (int *)malloc(sizeof(int) * MINI_BATCH_SIZE); net->train_q_name = TRAIN_Q; net->train_a_name = TRAIN_A; net->test_q_name = TEST_Q; net->test_a_name = TEST_A; net->report_file = REPORT_F; //init mode & thread for(i=0;i<THREAD_MODE_NUM;i++) net->thread[i] =THREAD_NUM; for(i=0;i<MODE_NUM;i++) { net->mode[i] =0; if(i==1)net->mode[i] = 1; } for (i = 0; i < net->num_layer; i++) { net->ac_neuron[i] = net->layer_size[i] + before_ac_neurals;//ac_neuron은 여태 누적한 neuron갯수.. before_ac_neurals = net->ac_neuron[i]; if (i == net->num_layer-1) continue; net->ac_weight[i] = net->layer_size[i] * net->layer_size[i+1] + before_ac_weights; //ac_weight는 여태 누적한 weight 의 갯수.. before_ac_weights = net->ac_weight[i]; } net->neuron = (double *) malloc(sizeof(double) * net->mini_batch_size * TOTAL_NEURONS(net)); //neuron 배열의 크기는 minibatch_size * 총 뉴련의 숫자 net->zs = (double *) malloc(sizeof(double) * net->mini_batch_size * TOTAL_NEURONS(net)); net->error = (double *) malloc(sizeof(double) * net->mini_batch_size * TOTAL_NEURONS(net)); net->bias = (double *) malloc(sizeof(double) * TOTAL_NEURONS(net)); net->weight = (double *) malloc(sizeof(double) * TOTAL_WEIGHTS(net)); for (i = 0; i < TOTAL_WEIGHTS(net); i++) { net->weight[i] = randn(); } for (i = 0; i < TOTAL_NEURONS(net); i++) { net->bias[i] = randn(); } } #if 0 void train(struct network *net) { int i, j, k, l; int nr_train = net->nr_train_data; int nr_loop = (int)(net->nr_train_data/net->mini_batch_size); //전체데이터를 미니배치 사이즈 만큼 나눈 수 입니다.(업데이트 할 숫자) int first_layer_size = AC_NEURONS(net, 0);//input size int last_layer_size = net->layer_size[net->num_layer-1]; //output size int recog = 0; // init weight with bias with random values for (i = 0; i < TOTAL_WEIGHTS(net); i++) { net->weight[i] = (double)rand()/(RAND_MAX/2)-1; } for (i = 0; i < TOTAL_NEURONS(net); i++) { net->bias[i] = 0; } for (i = 0; i < net->epoch; i++) { for (j = 0; j < nr_loop; j++)//j는 업데이트 하는 번수 (전체데이터를 mini batch로 나눈 값) { // copy input and output for SGD for (k = 0; k < net->mini_batch_size; k++) { //k는데이터 번호를 뜻합니다, mini batch 사이즈 전까지 증가합니다 int s_index = (int) rand()%nr_train; // copy input to first layer of neuron array for (l = 0; l < first_layer_size; l++) //l은 28*28 까지 증가합니다 NEURON(net, 0, k, l) = DATA_TRAIN_Q(net, s_index, l); //s_index 번째 데이터를 가져옵니다 그것을 net->neuron[net->layer_size[0]*(k) + (l)] 에 넣습니다. //즉 neuron 배열에 차곡차곡 랜덤한 인풋값을 넣습니다. for (l = 0; l < last_layer_size; l++) ERROR(net, net->num_layer-1, k, l) = 0.0; // copy output to error array ERROR(net, net->num_layer-1, k, DATA_TRAIN_A(net, s_index)) = 1.0; //답안 배열에 1의값 넣습니다. net->record_random[k] = s_index; } // feedforward + back_pass mini_batch size 만큼 다하고 함수들 실행 feedforward(net); cost_report(net); back_pass(net); backpropagation(net); } net->cost_rate = (net->cost_rate)/((net->mini_batch_size)*nr_loop); //reporting cost printf("%dth epoch cost = %lf \n", i,net->cost_rate); net->cost_rate = 0; } // test per every epoch recog = predict(net); if(recog > net->best_recog) net->best_recog = recog; printf("result %d / %d \n", recog, net->nr_test_data); } void feedforward(struct network *net) { int i, j, k, l, m; double sum = 0.0; timeutils *t_feedforward = &net->t_feedforward; // feedforward START_TIME(t_feedforward); sum = 0.0; if(net->mode[0]) { for (i = 0; i < net->num_layer-1; i++) { #pragma omp parallel for num_threads(net->thread[0]) private(j, k, l) reduction(+:sum) collapse(2) for(j=0;j<net->mini_batch_size;j++) { for (k = 0; k < net->layer_size[i+1]; k++) { for (l = 0; l < net->layer_size[i]; l++) { sum = sum + NEURON(net, i, j, l) * WEIGHT(net, i, l, k); } ZS(net, i+1, j, k) = sum + BIAS(net, i+1, k); NEURON(net, i+1, j, k) = sigmoid(ZS(net, i+1, j, k)); sum = 0.0; } } } } else { double *tmp, *tmp_bias; for (i = 0; i < net->num_layer-1; i++) { cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, net->mini_batch_size, net->layer_size[i+1], net->layer_size[i], 1.0, (const double *)&NEURON(net, i, 0, 0),net->layer_size[i], (const double *)&WEIGHT(net, i, 0, 0), net->layer_size[i+1], 0.0,&NEURON(net, i+1, 0, 0), net->layer_size[i+1]); //weight 와 입력값을 곱해서 배열에 저장합니다. #pragma omp parallel for num_threads(net->thread[0]) for (j = 0; j < net->mini_batch_size; j++) for (k = 0; k < net->layer_size[i+1]; k++) { ZS(net, i+1, j, k) = NEURON(net,i+1,j,k)+ BIAS(net,i+1,k); NEURON(net, i+1, j, k) = sigmoid(ZS(net, i+1, j, k)); //zs에 sigmoid를 취한 값을 그다음 뉴런에 저장합니다!! } } } END_TIME(t_feedforward); } void back_pass(struct network *net) { int i, j, k, l; int nr_chunk = net->thread[0]; double sum = 0.0; timeutils *t_back_pass = &net->t_back_pass; START_TIME(t_back_pass); if(net->mode[1]) { // calculate delta #pragma omp parallel for num_threads(net->thread[1]) private(i, j) collapse(2) for (i = 0; i < net->mini_batch_size; i++) { for (j = 0; j < net->layer_size[net->num_layer-1]; j++) { // calculate delta in last output layer ERROR(net, net->num_layer-1, i, j) = (NEURON(net, net->num_layer-1, i, j)-ERROR(net, net->num_layer-1, i, j)) * sigmoid_prime(ZS(net, net->num_layer-1, i, j)); } } sum = 0.0; for (i = net->num_layer-2; i > 0; i--) { #pragma omp parallel for num_threads(net->thread[2]) private(j, k, l) reduction(+:sum) collapse(2) for (j = 0; j < net->mini_batch_size; j++) { for (k = 0; k < net->layer_size[i]; k++) { for (l = 0; l < net->layer_size[i+1]; l++) { // calculate delta from before layer sum = sum + ERROR(net, i+1, j, l) * WEIGHT(net, i, k, l); } ERROR(net, i, j, k) = sum * sigmoid_prime(ZS(net, i, j, k)); sum = 0.0; } } } } else { double * temp1;//neuron - error double * temp2;//sigmoid zs double * temp_error; temp1 = (double*)malloc(sizeof(double) * net->mini_batch_size * net->layer_size[net->num_layer-1]); temp2 = (double*)malloc(sizeof(double) * net->mini_batch_size * net->layer_size[net->num_layer-1]); // neuron - error vdSub(net->layer_size[net->num_layer-1]*net->mini_batch_size,&NEURON(net, net->num_layer-1, 0, 0),&ERROR(net, net->num_layer-1, 0, 0),temp1); //sigmoid zs #pragma omp parallel for num_threads(net->thread[1]) for (i = 0; i < net->mini_batch_size*net->layer_size[net->num_layer-1]; i++) { temp2[i]=sigmoid_prime(ZS(net, net->num_layer-1, 0, i)); } //temp1 * temp2 (when this loop is end first delta is done!!) vdMul(net->layer_size[net->num_layer-1]*net->mini_batch_size,temp1,temp2,&ERROR(net, net->num_layer-1, 0, 0)); //caculrate delta to using backpropagation algorithm for (i = net->num_layer-2; i > 0; i--) { for (j = 0; j < net->mini_batch_size; j++) { //temp_error = weight * past_error temp_error = (double*)malloc(sizeof(double)*net->layer_size[i]); //calculate temp_error cblas_dgemv (CblasRowMajor, CblasNoTrans, net->layer_size[i], net->layer_size[i+1], 1.0,(const double *)&WEIGHT(net, i, 0, 0), net->layer_size[i+1],(const double *)&ERROR(net,i+1, j, 0),1 ,0.0 , temp_error , 1); //calculate delta = past error * weight * sigmoidprime(zs) #pragma omp parallel for num_threads(net->thread[2]) for(k=0;k<net->layer_size[i];k++) { ERROR(net, i, j, k) = temp_error[k]*sigmoid_prime(ZS(net, i, j, k)); } } } } END_TIME(t_back_pass); } /* Operation like backpropagation */ void backpropagation(struct network *net) { int i, j, k, l; timeutils *t_backpropagation = &net->t_backpropagation; double eta = net->learning_rate; double mini = (double) net->mini_batch_size; double sum = 0; START_TIME(t_backpropagation); //update bias for (i = 1; i < net->num_layer; i++) { #pragma omp parallel for num_threads(net->thread[3]) private(j, k, l) for (j = 0; j < net->layer_size[i]; j++) { for (k = 0; k < net->mini_batch_size; k++) { BIAS(net, i, j) -= (eta/mini)*ERROR(net, i, k, j); } } } //update weight if(net->mode[2]) { // update weight for (i = 0; i < net->num_layer-1; i++) { #pragma omp parallel for num_threads(net->thread[4]) private(j, k, l) collapse(2) for (j = 0; j < net->layer_size[i]; j++) { for (k = 0; k < net->layer_size[i+1]; k++) { #pragma omp simd reduction(+:sum) for (l = 0; l < net->mini_batch_size; l++) { // calculate delta from before layer sum += (eta/mini)*(NEURON(net, i, l, j) * ERROR(net, i+1, l, k)); } WEIGHT(net, i, j, k) -=sum; sum = 0; } } } } // update weight else { for (i = 0; i < net->num_layer-1; i++) { cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans,net->layer_size[i], net->layer_size[i+1],net->mini_batch_size, -(eta/mini), (const double *)&NEURON(net, i, 0, 0),net->layer_size[i], (const double *)&ERROR(net, i+1, 0, 0), net->layer_size[i+1], 1.0,&WEIGHT(net, i, 0, 0), net->layer_size[i+1]); } } END_TIME(t_backpropagation); } double sigmoid(double z) { return (1/(1 + exp(-z))); } double sigmoid_prime(double z) { return sigmoid(z)*(1-sigmoid(z)); } int predict(struct network *net) { int nr_true = 0; int i, j, k, l; double sum = 0.0; int nr_loop = (int)(net->nr_test_data); int first_layer_size = AC_NEURONS(net, 0); int last_layer_size = net->layer_size[net->num_layer-1]; double cost_rate = 0; for (i = 0; i < nr_loop; i++) { // copy input to first layer of neuron array for (j = 0; j < first_layer_size; j++) { NEURON(net, 0, 0, j) = DATA_TEST_Q(net, i, j); } //feedforward sum = 0.0; for (j = 0; j < net->num_layer-1; j++) { #pragma omp parallel for num_threads(100) private(k, l) reduction(+:sum) for (k = 0; k < net->layer_size[j+1]; k++) { for (l = 0; l < net->layer_size[j]; l++) { sum = sum + NEURON(net, j, 0, l) * WEIGHT(net, j, l, k); } ZS(net, j+1, 0, k) = sum + BIAS(net, j+1, k); NEURON(net, j+1, 0, k) = sigmoid(ZS(net, j+1, 0, k)); sum = 0.0; } } double max = NEURON(net, net->num_layer-1, 0, 0); int max_idx = 0; for (j = 0; j < last_layer_size; j++) { if (NEURON(net, net->num_layer-1, 0, j) > max) { max = NEURON(net, net->num_layer-1, 0, j); max_idx = j; } } if (DATA_TEST_A(net, i) == max_idx) nr_true ++; } return nr_true; } void cost_report(struct network * net ) { int nr_train_data = net->nr_train_data; int i; for(i=0;i<net->mini_batch_size;i++) { net->cost_rate += 1- NEURON(net, net->num_layer-1,i,DATA_TRAIN_A(net,net->record_random[i])); } } void report(struct network *net) { int *thread = (int *)net-> thread; int *mode = (int *)net->mode; timeutils *t_feedforward = &net->t_feedforward; timeutils *t_back_pass = &net->t_back_pass; timeutils *t_backpropagation = &net->t_backpropagation; timeutils t; timeutils *total = &t; TIMER_INIT(total); char *modeid[2] = {"MKL","OpenMP"}; int i = 0; FILE *f = fopen(net->report_file, "a+"); fprintf( f, "\n=======================REPORT=======================\n"); fprintf( f, "epoch : %d\n", net->epoch); fprintf( f, "learning_rate : %f\n", net->learning_rate); fprintf( f, "recognization rate : %d/%d\n", net->best_recog, net->nr_test_data); fprintf( f, "=======================THREADS======================\n"); fprintf( f, "feedforward thread : %d\n", thread[0]); fprintf( f, "back_pass thread1 : %d\n", thread[1]); fprintf( f, "back_pass thread2 : %d\n", thread[2]); fprintf( f, "backpropagation thread1 : %d\n", thread[3]); fprintf( f, "backpropagation thread2 : %d\n", thread[4]); fprintf( f, "========================MODE========================\n"); fprintf( f, "feedforward mode : %s\n", modeid[mode[0]]); fprintf( f, "back_pass mode : %s\n", modeid[mode[1]]); fprintf( f, "backpropagation mode : %s\n", modeid[mode[2]]); fprintf( f, "========================TIME========================\n"); fprintf( f, "feedforward : %ld.%d sec\n", TOTAL_SEC_TIME(t_feedforward), TOTAL_SEC_UTIME(t_feedforward)); fprintf( f, "back_pass : %ld.%d sec\n", TOTAL_SEC_TIME(t_back_pass), TOTAL_SEC_UTIME(t_back_pass)); fprintf( f, "backpropagation : %ld.%d sec\n", TOTAL_SEC_TIME(t_backpropagation), TOTAL_SEC_UTIME(t_backpropagation)); TIMER_ADD(t_feedforward, total); TIMER_ADD(t_back_pass, total); TIMER_ADD(t_backpropagation, total); fprintf( f, "total : %ld.%d sec\n", TOTAL_SEC_TIME(total), TOTAL_SEC_UTIME(total)); } #endif
hicoo.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include "hicoo.h" /** * Create a new sparse tensor in HiCOO format * @param hitsr a pointer to an uninitialized sparse tensor * @param nmodes number of modes the tensor will have * @param ndims the dimension of each mode the tensor will have * @param nnz number of nonzeros the tensor will have */ int ptiNewSparseTensorHiCOO( ptiSparseTensorHiCOO *hitsr, const ptiIndex nmodes, const ptiIndex ndims[], const ptiNnzIndex nnz, const ptiElementIndex sb_bits, const ptiElementIndex sk_bits, const ptiElementIndex sc_bits) { ptiIndex i; int result; hitsr->nmodes = nmodes; hitsr->sortorder = malloc(nmodes * sizeof hitsr->sortorder[0]); for(i = 0; i < nmodes; ++i) { hitsr->sortorder[i] = i; } hitsr->ndims = malloc(nmodes * sizeof *hitsr->ndims); pti_CheckOSError(!hitsr->ndims, "HiSpTns New"); memcpy(hitsr->ndims, ndims, nmodes * sizeof *hitsr->ndims); hitsr->nnz = nnz; /* Parameters */ hitsr->sb_bits = sb_bits; // block size by nnz hitsr->sk_bits = sk_bits; // kernel size by nnz hitsr->sc_bits = sc_bits; // chunk size by blocks ptiIndex sk = (ptiIndex)pow(2, sk_bits); hitsr->kschr = (ptiIndexVector**)malloc(nmodes * sizeof *hitsr->kschr); pti_CheckOSError(!hitsr->kschr, "HiSpTns New"); for(ptiIndex m = 0; m < nmodes; ++m) { ptiIndex kernel_ndim = (ndims[m] + sk - 1)/sk; hitsr->kschr[m] = (ptiIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr[m]))); pti_CheckOSError(!hitsr->kschr[m], "HiSpTns New"); for(ptiIndex i = 0; i < kernel_ndim; ++i) { result = ptiNewIndexVector(&(hitsr->kschr[m][i]), 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } } hitsr->nkiters = (ptiIndex*)malloc(nmodes * sizeof *hitsr->nkiters); result = ptiNewNnzIndexVector(&hitsr->kptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); result = ptiNewNnzIndexVector(&hitsr->cptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); /* Balanced structures */ hitsr->kschr_balanced = (ptiIndexVector**)malloc(nmodes * sizeof *hitsr->kschr_balanced); pti_CheckOSError(!hitsr->kschr_balanced, "HiSpTns New"); for(ptiIndex m = 0; m < nmodes; ++m) { ptiIndex kernel_ndim = (ndims[m] + sk - 1)/sk; hitsr->kschr_balanced[m] = (ptiIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr_balanced[m]))); pti_CheckOSError(!hitsr->kschr_balanced[m], "HiSpTns New"); for(ptiIndex i = 0; i < kernel_ndim; ++i) { result = ptiNewIndexVector(&(hitsr->kschr_balanced[m][i]), 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } } hitsr->kschr_balanced_pos = (ptiIndexVector**)malloc(nmodes * sizeof *hitsr->kschr_balanced_pos); pti_CheckOSError(!hitsr->kschr_balanced_pos, "HiSpTns New"); for(ptiIndex m = 0; m < nmodes; ++m) { ptiIndex kernel_ndim = (ndims[m] + sk - 1)/sk; hitsr->kschr_balanced_pos[m] = (ptiIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr_balanced_pos[m]))); pti_CheckOSError(!hitsr->kschr_balanced_pos[m], "HiSpTns New"); for(ptiIndex i = 0; i < kernel_ndim; ++i) { result = ptiNewIndexVector(&(hitsr->kschr_balanced_pos[m][i]), 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } } hitsr->nkpars = (ptiIndex*)malloc(nmodes * sizeof(ptiIndex)); pti_CheckOSError(!hitsr->nkpars, "HiSpTns New"); hitsr->kschr_rest = (ptiIndexVector*)malloc(nmodes * sizeof *hitsr->kschr_rest); pti_CheckOSError(!hitsr->kschr_rest, "HiSpTns New"); for(ptiIndex m = 0; m < nmodes; ++m) { result = ptiNewIndexVector(&(hitsr->kschr_rest[m]), 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } result = ptiNewNnzIndexVector(&hitsr->knnzs, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); result = ptiNewNnzIndexVector(&hitsr->bptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); hitsr->binds = malloc(nmodes * sizeof *hitsr->binds); pti_CheckOSError(!hitsr->binds, "HiSpTns New"); for(i = 0; i < nmodes; ++i) { result = ptiNewBlockIndexVector(&hitsr->binds[i], 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } hitsr->einds = malloc(nmodes * sizeof *hitsr->einds); pti_CheckOSError(!hitsr->einds, "HiSpTns New"); for(i = 0; i < nmodes; ++i) { result = ptiNewElementIndexVector(&hitsr->einds[i], 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } result = ptiNewValueVector(&hitsr->values, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); return 0; } /** * Create a new sparse tensor in HiCOO format * @param hitsr a pointer to an uninitialized sparse tensor * @param nmodes number of modes the tensor will have * @param ndims the dimension of each mode the tensor will have */ int ptiNewSparseTensorHiCOO_NoNnz( ptiSparseTensorHiCOO *hitsr, const ptiIndex nmodes, const ptiIndex ndims[], const ptiElementIndex sb_bits, const ptiElementIndex sk_bits, const ptiElementIndex sc_bits) { ptiIndex i; int result; hitsr->nmodes = nmodes; hitsr->sortorder = malloc(nmodes * sizeof hitsr->sortorder[0]); for(i = 0; i < nmodes; ++i) { hitsr->sortorder[i] = i; } hitsr->ndims = malloc(nmodes * sizeof *hitsr->ndims); pti_CheckOSError(!hitsr->ndims, "HiSpTns New"); memcpy(hitsr->ndims, ndims, nmodes * sizeof *hitsr->ndims); /* Parameters */ hitsr->sb_bits = sb_bits; // block size by nnz hitsr->sk_bits = sk_bits; // kernel size by nnz hitsr->sc_bits = sc_bits; // chunk size by blocks ptiIndex sk = (ptiIndex)pow(2, sk_bits); hitsr->kschr = (ptiIndexVector**)malloc(nmodes * sizeof *hitsr->kschr); pti_CheckOSError(!hitsr->kschr, "HiSpTns New"); for(ptiIndex m = 0; m < nmodes; ++m) { ptiIndex kernel_ndim = (ndims[m] + sk - 1)/sk; hitsr->kschr[m] = (ptiIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr[m]))); pti_CheckOSError(!hitsr->kschr[m], "HiSpTns New"); for(ptiIndex i = 0; i < kernel_ndim; ++i) { result = ptiNewIndexVector(&(hitsr->kschr[m][i]), 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } } hitsr->nkiters = (ptiIndex*)malloc(nmodes * sizeof *hitsr->nkiters); result = ptiNewNnzIndexVector(&hitsr->kptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); result = ptiNewNnzIndexVector(&hitsr->cptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); result = ptiNewNnzIndexVector(&hitsr->bptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); hitsr->binds = malloc(nmodes * sizeof *hitsr->binds); pti_CheckOSError(!hitsr->binds, "HiSpTns New"); for(i = 0; i < nmodes; ++i) { result = ptiNewBlockIndexVector(&hitsr->binds[i], 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } hitsr->einds = malloc(nmodes * sizeof *hitsr->einds); pti_CheckOSError(!hitsr->einds, "HiSpTns New"); for(i = 0; i < nmodes; ++i) { result = ptiNewElementIndexVector(&hitsr->einds[i], 0, 0); pti_CheckError(result, "HiSpTns New", NULL); } result = ptiNewValueVector(&hitsr->values, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); return 0; } /** * Release any memory the HiCOO sparse tensor is holding * @param hitsr the tensor to release */ void ptiFreeSparseTensorHiCOO(ptiSparseTensorHiCOO *hitsr) { ptiIndex i; ptiIndex nmodes = hitsr->nmodes; ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); for(ptiIndex m = 0; m < nmodes; ++m) { ptiIndex kernel_ndim = (hitsr->ndims[m] + sk - 1)/sk; for(i = 0; i < kernel_ndim; ++i) { ptiFreeIndexVector(&(hitsr->kschr[m][i])); } free(hitsr->kschr[m]); } free(hitsr->kschr); free(hitsr->nkiters); ptiFreeNnzIndexVector(&hitsr->kptr); ptiFreeNnzIndexVector(&hitsr->cptr); ptiFreeNnzIndexVector(&hitsr->bptr); for(i = 0; i < nmodes; ++i) { ptiFreeBlockIndexVector(&hitsr->binds[i]); ptiFreeElementIndexVector(&hitsr->einds[i]); } free(hitsr->binds); free(hitsr->einds); ptiFreeValueVector(&hitsr->values); hitsr->nmodes = 0; hitsr->nnz = 0; hitsr->sb_bits = 0; hitsr->sk_bits = 0; hitsr->sc_bits = 0; free(hitsr->sortorder); free(hitsr->ndims); } double SparseTensorFrobeniusNormSquaredHiCOO(ptiSparseTensorHiCOO const * const hitsr) { double norm = 0; ptiValue const * const restrict vals = hitsr->values.data; #ifdef HIPARTI_USE_OPENMP #pragma omp parallel for reduction(+:norm) #endif for(size_t n=0; n < hitsr->nnz; ++n) { norm += vals[n] * vals[n]; } return norm; }
State.h
//===-------- State.h - OpenMP State & ICV interface ------------- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_STATE_H #define OMPTARGET_STATE_H #include "Debug.h" #include "Types.h" #pragma omp declare target namespace _OMP { namespace state { inline constexpr uint32_t SharedScratchpadSize = SHARED_SCRATCHPAD_SIZE; /// Initialize the state machinery. Must be called by all threads. void init(bool IsSPMD); /// TODO enum ValueKind { VK_NThreads, VK_Level, VK_ActiveLevel, VK_MaxActiveLevels, VK_RunSched, // --- VK_RunSchedChunk, VK_ParallelRegionFn, VK_ParallelTeamSize, }; /// TODO void enterDataEnvironment(IdentTy *Ident); /// TODO void exitDataEnvironment(); /// TODO struct DateEnvironmentRAII { DateEnvironmentRAII(IdentTy *Ident) { enterDataEnvironment(Ident); } ~DateEnvironmentRAII() { exitDataEnvironment(); } }; /// TODO void resetStateForThread(uint32_t TId); uint32_t &lookup32(ValueKind VK, bool IsReadonly, IdentTy *Ident); void *&lookupPtr(ValueKind VK, bool IsReadonly); /// A class without actual state used to provide a nice interface to lookup and /// update ICV values we can declare in global scope. template <typename Ty, ValueKind Kind> struct Value { __attribute__((flatten, always_inline)) operator Ty() { return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr); } __attribute__((flatten, always_inline)) Value &operator=(const Ty &Other) { set(Other, /* IdentTy */ nullptr); return *this; } __attribute__((flatten, always_inline)) Value &operator++() { inc(1, /* IdentTy */ nullptr); return *this; } __attribute__((flatten, always_inline)) Value &operator--() { inc(-1, /* IdentTy */ nullptr); return *this; } private: __attribute__((flatten, always_inline)) Ty &lookup(bool IsReadonly, IdentTy *Ident) { Ty &t = lookup32(Kind, IsReadonly, Ident); return t; } __attribute__((flatten, always_inline)) Ty &inc(int UpdateVal, IdentTy *Ident) { return (lookup(/* IsReadonly */ false, Ident) += UpdateVal); } __attribute__((flatten, always_inline)) Ty &set(Ty UpdateVal, IdentTy *Ident) { return (lookup(/* IsReadonly */ false, Ident) = UpdateVal); } template <typename VTy, typename Ty2> friend struct ValueRAII; }; /// A mookup class without actual state used to provide /// a nice interface to lookup and update ICV values /// we can declare in global scope. template <typename Ty, ValueKind Kind> struct PtrValue { __attribute__((flatten, always_inline)) operator Ty() { return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr); } __attribute__((flatten, always_inline)) PtrValue &operator=(const Ty Other) { set(Other); return *this; } private: Ty &lookup(bool IsReadonly, IdentTy *) { return lookupPtr(Kind, IsReadonly); } Ty &set(Ty UpdateVal) { return (lookup(/* IsReadonly */ false, /* IdentTy */ nullptr) = UpdateVal); } template <typename VTy, typename Ty2> friend struct ValueRAII; }; template <typename VTy, typename Ty> struct ValueRAII { ValueRAII(VTy &V, Ty NewValue, Ty OldValue, bool Active, IdentTy *Ident) : Ptr(Active ? V.lookup(/* IsReadonly */ false, Ident) : Val), Val(OldValue), Active(Active) { if (!Active) return; ASSERT(Ptr == OldValue && "ValueRAII initialization with wrong old value!"); Ptr = NewValue; } ~ValueRAII() { if (Active) Ptr = Val; } private: Ty &Ptr; Ty Val; bool Active; }; /// TODO inline state::Value<uint32_t, state::VK_RunSchedChunk> RunSchedChunk; /// TODO inline state::Value<uint32_t, state::VK_ParallelTeamSize> ParallelTeamSize; /// TODO inline state::PtrValue<ParallelRegionFnTy, state::VK_ParallelRegionFn> ParallelRegionFn; void runAndCheckState(void(Func(void))); void assumeInitialState(bool IsSPMD); } // namespace state namespace icv { /// TODO inline state::Value<uint32_t, state::VK_NThreads> NThreads; /// TODO inline state::Value<uint32_t, state::VK_Level> Level; /// The `active-level` describes which of the parallel level counted with the /// `level-var` is active. There can only be one. /// /// active-level-var is 1, if ActiveLevelVar is not 0, otherweise it is 0. inline state::Value<uint32_t, state::VK_ActiveLevel> ActiveLevel; /// TODO inline state::Value<uint32_t, state::VK_MaxActiveLevels> MaxActiveLevels; /// TODO inline state::Value<uint32_t, state::VK_RunSched> RunSched; } // namespace icv namespace memory { /// Alloca \p Size bytes in shared memory, if possible, for \p Reason. /// /// Note: See the restrictions on __kmpc_alloc_shared for proper usage. void *allocShared(uint64_t Size, const char *Reason); /// Free \p Ptr, alloated via allocShared, for \p Reason. /// /// Note: See the restrictions on __kmpc_free_shared for proper usage. void freeShared(void *Ptr, uint64_t Bytes, const char *Reason); /// Alloca \p Size bytes in global memory, if possible, for \p Reason. void *allocGlobal(uint64_t Size, const char *Reason); /// Return a pointer to the dynamic shared memory buffer. void *getDynamicBuffer(); /// Free \p Ptr, alloated via allocGlobal, for \p Reason. void freeGlobal(void *Ptr, const char *Reason); } // namespace memory } // namespace _OMP #pragma omp end declare target #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, // @mulle-objc@ new property accessors > Selector AdderSel, SourceLocation AdderNameLoc, Selector RemoverSel, SourceLocation RemoverNameLoc, // @mulle-objc@ new property accessors < const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, // @mulle-objc@ new property accessors > Selector AdderSel, SourceLocation AdderNameLoc, Selector RemoverSel, SourceLocation RemoverNameLoc, // @mulle-objc@ new property accessors < const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); // @mulle-objc@ MetaABI: additional methods GetMulle_paramExpr GetMulle_paramFieldExpr ExprResult GetMulle_paramExpr( Scope *S, CXXScopeSpec &SS, SourceLocation Loc, StringRef Name); ExprResult GetMulle_paramFieldExpr( FieldDecl *FD, Scope *S, CXXScopeSpec &SS, SourceLocation Loc); ExprResult GetMulle_paramExprAsType( QualType type, Scope *S, CXXScopeSpec &SS, SourceLocation Loc, StringRef Name); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, CXXScopeSpec &SS, // @mulle-objc@ add CXXScopeSpec for MetaABI > IdentifierInfo *II, // @mulle-objc@ add CXXScopeSpec for MetaABI > bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); /// Check whether the given type-dependent expression will be the name of a /// function or another callable function-like entity (e.g. a function // template or overload set) for any substitution. bool IsDependentFunctionNameExpr(Expr *E); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool Disambiguation = false); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block, /// A type constraint, UPPC_TypeConstraint }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); // @mulle-objc@ new property attribute container > void VerifyPropertyNonGetterMethod( ObjCPropertyDecl *property, ObjCMethodDecl *method, std::string name); ObjCMethodDecl *CreatePropertyNonGetterMethod( ObjCContainerDecl *CD, ObjCPropertyDecl *property, Selector Selector, bool isSetter = true); // @mulle-objc@ new property attribute container < void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, // @mulle-objc@ new property attribute container > Selector AdderSel, Selector RemoverSel, // @mulle-objc@ new property attribute container < tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); // @mulle-objc@ MetaABI: additional method SetMulleObjCParam > // Why do I have to specify the size of the vector when passing ?? void SetMulleObjCParam( ObjCMethodDecl *ObjCMethod, Selector Sel, SmallVector<ParmVarDecl*, 16> *Params, QualType resultType, unsigned int abiDesc, SourceLocation Loc); bool isMetaABIAllocaMethod( ObjCMethodDecl *ObjCMethod, QualType resultType); enum MetaABIDescription { MetaABIVoidPtrRval = 0x0, MetaABIVoidPtrParam = 0x1, MetaABIRvalAsStruct = 0x2, MetaABIParamAsStruct = 0x4 }; unsigned int metaABIDescription( SmallVector<ParmVarDecl*, 16> &Params, QualType resultType); // @mulle-objc@ MetaABI: additional method SetMulleObjCParam < ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); // @mulle-objc@ compiler: additional method CheckMulleObjCFunctionDefined bool CheckMulleObjCFunctionDefined( Scope *S, SourceLocation Loc, StringRef Name); // @mulle-objc@ AAM: check that selectors conform int CheckSelectorForAAM( Selector Sel, ObjCMethodDecl *Method, QualType ReceiverType, SourceLocation SelLoc, SourceRange RecRange); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. using OMPCtxStringType = SmallString<8>; using OMPCtxSelectorData = OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, SourceRange SR, ArrayRef<OMPCtxSelectorData> Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation DepLinMapLastLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); // @mulle-objc@ new property attributes container > void CodeCompleteObjCPropertyAdder(Scope *S); void CodeCompleteObjCPropertyRemover(Scope *S); // @mulle-objc@ new property attributes container < void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
VectorMatrix.h
// Copyright 2015 Christina Teflioudi // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * VectorMatrix.h * * Created on: Oct 10, 2013 * Author: chteflio */ #ifndef VECTORMATRIX_H_ #define VECTORMATRIX_H_ #include <boost/numeric/ublas/matrix_proxy.hpp> #include <fstream> #include <iostream> #include <cmath> #include <util/exception.h> #include <util/io.h> #include <string> #include <ostream> #include <iomanip> #include <boost/unordered_map.hpp> #include <boost/algorithm/string/predicate.hpp> #ifdef WITH_SIMD #include <pmmintrin.h> //SSE3 #endif using boost::unordered_map; namespace mips { inline void skipLineFromFile(std::ifstream &file) { char c = 0; while (c != '\n' && !file.eof() && file.good()) { file >> std::noskipws >> c; } file >> std::skipws; } inline void computeDefaultBlockOffsets(row_type size, row_type blocks, std::vector<row_type> &blockOffsets, row_type start = 0) { blockOffsets.resize(blocks); row_type minSize = size / blocks; row_type remainder = size % blocks; for (row_type i = 0; i < blocks; ++i) { if (i == 0) { blockOffsets[i] = start; } else { blockOffsets[i] = minSize + blockOffsets[i - 1]; if (remainder > 0) { ++blockOffsets[i]; --remainder; } } } }; inline void scaleAndCopy(double *v1, const double *v2, double scale, col_type colNum) { for (int j = 0; j < colNum; ++j) { v1[j] = v2[j] * scale; } } inline void copy(double *v1, const double *v2, col_type colNum) { for (int j = 0; j < colNum; ++j) { v1[j] = v2[j]; } // std::memcpy((void*) v1, (void*) v2, sizeof (double)*colNum); } inline double calculateLength(const double *vec, col_type colNum) { double len = 0; for (int j = 0; j < colNum; ++j) { len += vec[j] * vec[j]; } return sqrt(len); } class VectorMatrix { double *data; bool shuffled, normalized, extraMult; row_type offset; col_type lengthOffset; int sizeDiv2; // for simd instruction inline void zeroOutLastPadding() { for (row_type i = 0; i < rowNum; ++i) { data[(i + 1) * offset - 1] = 0; // zero-out the last padding } } // stuff that needs to be done in both read methods // rowNum and colNum need to be initialized before calling this method inline void readFromFileCommon() { if (pow(2, sizeof(col_type) * 8) - 1 < colNum) { std::cerr << "Your vectors have dimensionality " << colNum << " which is more than what lemp is compiled to store. Change " "the col_type in BasicStructs.h and recompile!" << std::endl; exit(1); } if (pow(2, sizeof(row_type) * 8) - 1 < rowNum) { std::cerr << "Your dataset has " << rowNum << " vectors which is more than what lemp is compiled to " "store. Change the row_type in BasicStructs.h and recompile!" << std::endl; exit(1); } initializeBasics(colNum, rowNum, false); if (colNum < NUM_LISTS) { std::cout << "[WARNING] Your vectors have dimensionality" << colNum << " and the tuner will try to search among " << NUM_LISTS << ". Perhaps you want to change the parameter NUM_LISTS in " "Definitions.h and recompile!" << std::endl; } if (LOWER_LIMIT_PER_BUCKET >= rowNum) { std::cout << "[WARNING] You have " << rowNum << " vectors and the tuner will try to take a sample of at least " << LOWER_LIMIT_PER_BUCKET << " vectors per probe bucket. Perhaps you want to change the " "parameter LOWER_LIMIT_PER_BUCKET in Definitions.h and recompile!" << std::endl; } for (int i = 0; i < rowNum; i++) { setLengthInData(i, 1); } } inline void readFromFileCSV(const std::string &fileName, ta_size_type col, ta_size_type row) { std::ifstream file(fileName.c_str(), std::ios_base::in); if (!file.is_open()) { std::cout << "[ERROR] Fail to open file: " << fileName << std::endl; exit(1); } rowNum = row; colNum = col; std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl; VectorMatrix::readFromFileCommon(); std::string buffer; if (file) { for (ta_size_type i = 0; i < row; i++) { double *d = getMatrixRowPtr(i); for (ta_size_type j = 0; j < col; j++) { double f; file >> f; if (j != col - 1) { std::getline(file, buffer, ','); } d[j] = f; } std::getline(file, buffer); } } file.close(); } inline void readFromFileMMA(const std::string &fileName, bool left = true) { std::ifstream file(fileName.c_str(), std::ios_base::in); if (!file.is_open()) { std::cout << "[ERROR] Fail to open file: " << fileName << std::endl; exit(1); } while (file.peek() == '%') { skipLineFromFile(file); } ta_size_type col; // columns ta_size_type row; // rows file >> row >> col; rowNum = (left ? row : col); colNum = (left ? col : row); std::cout << "[INFO] VectorMatrix will be read from " << fileName << " (" << rowNum << " vectors with dimensionality " << (0 + colNum) << ")" << std::endl; VectorMatrix::readFromFileCommon(); if (left) { if (file) { for (ta_size_type i = 0; i < col; i++) { // read one column for (ta_size_type j = 0; j < row; j++) { double f; file >> f; double *d = getMatrixRowPtr(j); d[i] = f; } } } file.close(); } else { if (file) { for (ta_size_type i = 0; i < col; i++) { // read one column for (ta_size_type j = 0; j < row; j++) { double f; file >> f; double *d = getMatrixRowPtr(i); d[j] = f; } } } file.close(); } } // const VectorMatrix& operator =(const VectorMatrix& m); public: std::vector<double> cweights; // forAP std::vector<double> maxVectorCoord; // forAP std::vector<row_type> vectorNNZ; // forAP std::vector<QueueElement> lengthInfo; // data: length id: vectorId std::vector<double> epsilonEquivalents; col_type colNum; row_type rowNum; friend void splitMatrices(const VectorMatrix &originalMatrix, std::vector<VectorMatrix> &matrices); friend void initializeMatrices(const VectorMatrix &originalMatrix, std::vector<VectorMatrix> &matrices, bool sort, bool ignoreLengths, double epsilon); inline VectorMatrix() : data(nullptr), shuffled(false), normalized(false), lengthOffset(1) { ////////////////////// 1 is for padding } inline VectorMatrix(double *ptr, col_type _colNum, row_type _rowNum) : data(ptr), colNum(_colNum), rowNum(_rowNum), shuffled(false), normalized(false) {} inline VectorMatrix(const std::vector<std::vector<double> > m) : data(nullptr), shuffled(false), normalized(false), lengthOffset(1) { initializeBasics(m[0].size(), m.size(), false); #pragma omp parallel for schedule(static, 1000) for (int i = 0; i < rowNum; ++i) { double *v1 = getMatrixRowPtr(i); const double *v2 = &m[i][0]; // std::memcpy((void*) v1, (void*) v2, sizeof // (double)*colNum); copy(v1, v2, colNum); // for (int j = 0; j < colNum; ++j) { //// v1[j] = v2[j]; // std::cout<<v1[j]<<" "; // } // std::cout<<std::endl; } // std::cout<<"offset: "<<(int)offset<<" // "<<(int)lengthOffset<<std::endl; } VectorMatrix &operator=(const VectorMatrix &r) { colNum = r.colNum; rowNum = r.rowNum; shuffled = r.shuffled; normalized = r.normalized; extraMult = r.extraMult; offset = r.offset; lengthOffset = r.lengthOffset; sizeDiv2 = r.sizeDiv2; lengthInfo.clear(); lengthInfo.reserve(r.lengthInfo.size()); std::copy(r.lengthInfo.begin(), r.lengthInfo.end(), back_inserter(lengthInfo)); cweights.clear(); cweights.reserve(r.cweights.size()); std::copy(r.cweights.begin(), r.cweights.end(), back_inserter(cweights)); maxVectorCoord.clear(); maxVectorCoord.reserve(r.maxVectorCoord.size()); std::copy(r.maxVectorCoord.begin(), r.maxVectorCoord.end(), back_inserter(maxVectorCoord)); vectorNNZ.clear(); vectorNNZ.reserve(r.vectorNNZ.size()); std::copy(r.vectorNNZ.begin(), r.vectorNNZ.end(), back_inserter(vectorNNZ)); epsilonEquivalents.clear(); epsilonEquivalents.reserve(r.epsilonEquivalents.size()); std::copy(r.epsilonEquivalents.begin(), r.epsilonEquivalents.end(), back_inserter(epsilonEquivalents)); int res = posix_memalign((void **)&(data), 16, sizeof(double) * offset * rowNum); if (res != 0) { std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl; exit(1); } std::memcpy((void *)data, (void *)r.data, sizeof(double) * offset * rowNum); } inline ~VectorMatrix() { if (data != nullptr) { free(data); data = nullptr; } } inline void fillInRandom(row_type rows, col_type cols) { initializeBasics(cols, rows, false); rg::Random32 rand(time(nullptr)); for (int i = 0; i < rowNum; ++i) { double *vec = getMatrixRowPtr(i); for (int j = 0; j < colNum; ++j) { vec[j] = rand.nextDouble(); } } } inline void initializeBasics(col_type numOfColumns, row_type numOfRows, bool norm) { colNum = numOfColumns; offset = colNum + 2; sizeDiv2 = colNum & (-2); extraMult = (sizeDiv2 < colNum); if (extraMult) offset++; rowNum = numOfRows; normalized = norm; lengthInfo.resize(rowNum); int res = posix_memalign((void **)&(data), 16, sizeof(double) * offset * rowNum); if (res != 0) { std::cout << "[ERROR] Problem with allocating memory for VectorMatrix!" << std::endl; exit(1); } if (extraMult) { zeroOutLastPadding(); } } inline void readFromFile(const std::string &fileName, int numCoordinates, int numVectors, bool left = true) { if (boost::algorithm::ends_with(fileName, ".csv")) { if (numCoordinates == 0 || numVectors == 0) { std::cerr << "When using csv files, you should provide the number of " "coordinates (--r) and the number of vectors (--m or --n)!" << std::endl; exit(1); } readFromFileCSV(fileName, numCoordinates, numVectors); } else if (boost::algorithm::ends_with(fileName, ".mma")) { readFromFileMMA(fileName, left); } else { std::cerr << "No valid input file format to read a VectorMatrix from!" << std::endl; exit(1); } } inline void init(const VectorMatrix &matrix, bool sort, bool ignoreLength) { initializeBasics(matrix.colNum, matrix.rowNum, true); if (ignoreLength) { #pragma omp parallel for schedule(static, 1000) // get lengths for (int i = 0; i < rowNum; ++i) { const double *vec = matrix.getMatrixRowPtr(i); double len = calculateLength(vec, colNum); lengthInfo[i] = QueueElement(1, i); setLengthInData(i, 1); double x = 1 / len; double *d1 = getMatrixRowPtr(i); scaleAndCopy(d1, vec, x, colNum); } } else { #pragma omp parallel for schedule(static, 1000) for (int i = 0; i < rowNum; ++i) { const double *vec = matrix.getMatrixRowPtr(i); double len = calculateLength(vec, colNum); lengthInfo[i] = QueueElement(len, i); } if (sort) { shuffled = true; std::sort(lengthInfo.begin(), lengthInfo.end(), std::greater<QueueElement>()); } #pragma omp parallel for schedule(static, 1000) for (int i = 0; i < rowNum; ++i) { setLengthInData(i, lengthInfo[i].data); double x = 1 / lengthInfo[i].data; double *d1 = getMatrixRowPtr(i); double *d2 = matrix.getMatrixRowPtr(lengthInfo[i].id); scaleAndCopy(d1, d2, x, colNum); } } } inline void addVectors(const VectorMatrix &matrix, const std::vector<row_type> &dataIds) { initializeBasics(matrix.colNum, dataIds.size(), false); for (int i = 0; i < rowNum; ++i) { const double *vec = matrix.getMatrixRowPtr(dataIds[i]); lengthInfo[i] = QueueElement(1, dataIds[i]); double *d1 = getMatrixRowPtr(i); scaleAndCopy(d1, vec, 1, colNum); } } inline double *getMatrixRowPtr(row_type row) const { // the row starts from pos 1. Do ptr[-1] to get the length return &data[row * offset + 1 + lengthOffset]; } inline void print(row_type row) const { const double *vec = getMatrixRowPtr(row); for (int i = 0; i < colNum; ++i) { std::cout << i << ":" << vec[i] << " "; } std::cout << std::endl; std::cout << "Length: " << vec[-1] << " or " << lengthInfo[row].data << std::endl; std::cout << "hasId: " << lengthInfo[row].id << std::endl; } inline double getVectorLength(row_type row) const { return data[row * offset + lengthOffset]; } inline double setLengthInData(row_type row, double len) { return data[row * offset + lengthOffset] = len; } inline row_type getId(row_type row) const { return (normalized ? lengthInfo[row].id : row); } inline double cosine(row_type row, const double *query) const { const double *d_ptr = getMatrixRowPtr(row); double cosine = 0; #ifdef WITH_SIMD __m128d sum = _mm_set1_pd(0.0); int size = colNum + extraMult; for (int i = 0; i < size; i += 2) { sum = _mm_add_pd( sum, _mm_mul_pd(_mm_load_pd(d_ptr + i), _mm_load_pd(query + i))); } cosine = _mm_cvtsd_f64(_mm_hadd_pd(sum, sum)); return cosine; #else for (int i = 0; i < colNum; ++i) { cosine += query[i] * d_ptr[i]; } return cosine; #endif } inline double L2Distance(row_type row, const double *query) const { const double *d_ptr = getMatrixRowPtr(row); double dist = 0; if (normalized) { for (int i = 0; i < colNum; ++i) { double value = query[i] * query[-1] - d_ptr[i] * d_ptr[-1]; // unnormalize dist += value * value; } } else { for (int i = 0; i < colNum; ++i) { dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]); } } return sqrt(dist); } inline double L2Distance2(row_type row, const double *query) const { // I assume non normalized case as needed in PCA trees const double *d_ptr = getMatrixRowPtr(row); double dist = 0; for (int i = 0; i < colNum; ++i) { dist += (query[i] - d_ptr[i]) * (query[i] - d_ptr[i]); } return dist; } inline double innerProduct(row_type row, const double *query) const { const double ip = query[-1] * getVectorLength(row) * cosine(row, query); return ip; } inline std::pair<bool, double> passesThreshold(row_type row, const double *query, double theta) const { std::pair<bool, double> p; double ip = 1; if (normalized) { ip = query[-1] * getVectorLength(row); if (ip < theta) { p.first = false; return p; } } ip *= cosine(row, query); p.second = ip; if (ip < theta) { p.first = false; return p; } else { p.first = true; return p; } } }; // ignores the lengths inline void splitMatrices(const VectorMatrix &originalMatrix, std::vector<VectorMatrix> &matrices) { row_type threads = matrices.size(); if (threads == 1) { matrices[0] .initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, false); for (int i = 0; i < matrices[0].rowNum; ++i) { const double *vec = originalMatrix.getMatrixRowPtr(i); matrices[0].lengthInfo[i] = QueueElement(1, i); matrices[0].setLengthInData(i, 1); double *d1 = matrices[0].getMatrixRowPtr(i); scaleAndCopy(d1, vec, 1, originalMatrix.colNum); } } else { omp_set_num_threads(threads); std::vector<row_type> permuteVector(originalMatrix.rowNum); std::iota(permuteVector.begin(), permuteVector.end(), 0); rg::Random32 random(123); rg::shuffle(permuteVector.begin(), permuteVector.end(), random); std::vector<row_type> blockOffsets; computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets); #pragma omp parallel { row_type tid = omp_get_thread_num(); row_type start = blockOffsets[tid]; row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]); matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true); for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double *vec = originalMatrix.getMatrixRowPtr(ind); matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start); matrices[tid].setLengthInData(i - start, 1); double *d1 = matrices[tid].getMatrixRowPtr(i - start); scaleAndCopy(d1, vec, 1, originalMatrix.colNum); matrices[tid].lengthInfo[i - start].id = ind; // the original id } } } } /* map: id: original matrix id, first: thread second: posInMatrix */ inline void initializeMatrices(const VectorMatrix &originalMatrix, std::vector<VectorMatrix> &matrices, bool sort, bool ignoreLengths, double epsilon = 0) { row_type threads = matrices.size(); if (threads == 1) { matrices[0] .initializeBasics(originalMatrix.colNum, originalMatrix.rowNum, true); if (ignoreLengths) { #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[0].epsilonEquivalents.resize(matrices[0].rowNum, epsilon); #endif for (int i = 0; i < matrices[0].rowNum; ++i) { const double *vec = originalMatrix.getMatrixRowPtr(i); double len = calculateLength(vec, matrices[0].colNum); matrices[0].lengthInfo[i] = QueueElement(1, i); matrices[0].setLengthInData(i, 1); double x = 1 / len; double *d1 = matrices[0].getMatrixRowPtr(i); scaleAndCopy(d1, vec, x, originalMatrix.colNum); #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[0].epsilonEquivalents[i] *= x; #endif } } else { for (int i = 0; i < matrices[0].rowNum; ++i) { const double *vec = originalMatrix.getMatrixRowPtr(i); double len = calculateLength(vec, matrices[0].colNum); matrices[0].lengthInfo[i] = QueueElement(len, i); } if (sort) { matrices[0].shuffled = true; std::sort(matrices[0].lengthInfo.begin(), matrices[0].lengthInfo.end(), std::greater<QueueElement>()); } for (int i = 0; i < matrices[0].rowNum; ++i) { matrices[0].setLengthInData(i, matrices[0].lengthInfo[i].data); double x = 1 / matrices[0].lengthInfo[i].data; double *d1 = matrices[0].getMatrixRowPtr(i); double *d2 = originalMatrix.getMatrixRowPtr(matrices[0].lengthInfo[i].id); scaleAndCopy(d1, d2, x, originalMatrix.colNum); } } } else { // multiple threads omp_set_num_threads(threads); std::vector<row_type> permuteVector(originalMatrix.rowNum); std::iota(permuteVector.begin(), permuteVector.end(), 0); rg::Random32 random(123); rg::shuffle(permuteVector.begin(), permuteVector.end(), random); std::vector<row_type> blockOffsets; computeDefaultBlockOffsets(permuteVector.size(), threads, blockOffsets); #pragma omp parallel { row_type tid = omp_get_thread_num(); row_type start = blockOffsets[tid]; row_type end = (tid == blockOffsets.size() - 1 ? originalMatrix.rowNum : blockOffsets[tid + 1]); matrices[tid].initializeBasics(originalMatrix.colNum, end - start, true); if (ignoreLengths) { #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[tid].epsilonEquivalents.resize(matrices[tid].rowNum, epsilon); #endif for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double *vec = originalMatrix.getMatrixRowPtr(ind); double len = calculateLength(vec, matrices[tid].colNum); matrices[tid].lengthInfo[i - start] = QueueElement(1, i - start); matrices[tid].setLengthInData(i - start, 1); double x = 1 / len; double *d1 = matrices[tid].getMatrixRowPtr(i - start); scaleAndCopy(d1, vec, x, originalMatrix.colNum); matrices[tid].lengthInfo[i - start].id = ind; // the original id #if defined(ABS_APPROX) || defined(HYBRID_APPROX) matrices[tid].epsilonEquivalents[i] *= x; #endif } } else { for (int i = start; i < end; ++i) { row_type ind = permuteVector[i]; const double *vec = originalMatrix.getMatrixRowPtr(ind); double len = calculateLength(vec, matrices[tid].colNum); matrices[tid].lengthInfo[i - start] = QueueElement(len, i - start); } if (sort) { matrices[tid].shuffled = true; std::sort(matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), std::greater<QueueElement>()); } for (int i = 0; i < matrices[tid].rowNum; ++i) { matrices[tid].setLengthInData(i, matrices[tid].lengthInfo[i].data); double x = 1 / matrices[tid].lengthInfo[i].data; row_type ind = permuteVector[matrices[tid].lengthInfo[i].id + start]; double *d1 = matrices[tid].getMatrixRowPtr(i); double *d2 = originalMatrix.getMatrixRowPtr(ind); scaleAndCopy(d1, d2, x, originalMatrix.colNum); matrices[tid].lengthInfo[i].id = ind; // the original id } } } } } void calculateAPneededForQuery(std::vector<VectorMatrix> &matrices, double thres, int k, std::vector<double> &global_cweights) { global_cweights.resize(matrices[0].colNum, 0); #pragma omp parallel { row_type tid = omp_get_thread_num(); col_type colNum = matrices[tid].colNum; row_type rowNum = matrices[tid].rowNum; row_type endUser = rowNum; if (k == 0) { auto up = std::lower_bound( matrices[tid].lengthInfo.begin(), matrices[tid].lengthInfo.end(), QueueElement(thres, 0), std::greater<QueueElement>()); endUser = up - matrices[tid].lengthInfo.begin(); } matrices[tid].cweights.resize(colNum); matrices[tid].maxVectorCoord.resize(rowNum); matrices[tid].vectorNNZ.resize(rowNum, 0); for (int i = 0; i < endUser; ++i) { double *d = matrices[tid].getMatrixRowPtr(i); for (int j = 0; j < colNum; ++j) { if (matrices[tid].cweights[j] < fabs(d[j])) matrices[tid].cweights[j] = fabs(d[j]); if (d[j] != 0) matrices[tid].vectorNNZ[i]++; if (fabs(d[j]) > matrices[tid].maxVectorCoord[i]) matrices[tid].maxVectorCoord[i] = fabs(d[j]); } } #pragma omp critical { for (int i = 0; i < colNum; ++i) { if (global_cweights[i] < matrices[tid].cweights[i]) global_cweights[i] = matrices[tid].cweights[i]; } } } } } #endif /* VECTORMATRIX_H_ */
mobilenet_224.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {224, 224, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 224, 224}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 224, 3, CONV_SIZE, CONV_SIZE }, { 224, 1, CONV_SIZE, CONV_SIZE }, { 43, 224, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {43, 224, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {64, 224, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 224; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 224; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 224, 224); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
entrega.c
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <omp.h> void add (int A[], int B[], int C[], int N) { int i, carry, sum; carry = 0; for (i=0; i<N; i++) { sum = A[i] + B[i] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; C[i] = sum; } if (carry) printf ("overflow in addition!\n"); } /* B = n * A */ void multiply_one_digit (int A[], int B[], int n, int N) { int i, carry; carry = 0; for (i=0; i<N; i++) { B[i] = n * A[i]; B[i] += carry; if (B[i] >= 10) { carry = B[i] / 10; B[i] %= 10; } else carry = 0; } if (carry) printf ("overflow in multiplication!\n"); } /* "multiplies" a number by BASEn */ void shift_left (int A[], int n, int N) { int i; for (i=N-1; i>=n; i--) A[i] = A[i-n]; while (i >= 0) A[i--] = 0; } /* C = A * B */ void multiply (int A[], int B[], int C[], int N) { int i, j, P[N]; for (i=0; i<N; i++) { /* multiply B by digit A[i] */ multiply_one_digit (B, P, A[i], N); /* shift the partial product left i bytes */ shift_left (P, i, N); /* add result to the running sum */ add (C, P, C, N); } } main(int argc, char**argv) { //printf("%s\n", argv[1]); int len1 = strlen(argv[1]); printf("%d\n",len1); //printf("%s\n", argv[2]); int len2 = strlen(argv[2]); //printf("%d\n",len2); int N = len1+len2; int A[N], B[N], C[N]; for(int i=0;i < N; i++){ A[i] = 0; B[i] = 0; C[i] = 0; } char k[len1]; strcpy(k, argv[1]); for(int i=0;i < len1; i++){ A[i] = k[len1-1-i] - '0'; } char l[len2]; strcpy(l, argv[2]); for(int i=0;i < len2; i++){ B[i] = l[len2-1-i] - '0'; } // SECUENCIAL multiply(A,B,C,N); printf("---SECUENCIAL---\n"); printf("A [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", A[loop]); printf("]\n"); printf("B [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", B[loop]); printf("]\nC [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", C[loop]); printf("]\n"); // PARALELO int E[N]; for(int i=0;i < N; i++) E[i] = 0; //multiply printf("---PARALELO---\n"); printf("A [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", A[loop]); printf("]\n"); printf("B [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", B[loop]); //printf("]\nC [ "); printf("]\n"); omp_set_dynamic(0); omp_set_num_threads(4); int D[4*N]; int n, i, carry,j,sum, P[N], tid, nthreads; #pragma omp parallel private(i,n, carry, j, sum, P, tid) { nthreads = omp_get_num_threads(); for(i=0;i < N*nthreads; i++){ D[i] = 0; E[i] = 0; } #pragma omp barrier tid = omp_get_thread_num(); //printf("soy el thread %u de %u \n", tid, nthreads); for (i=tid; i<N; i=i+nthreads) { n = A[i]; //printf("\nthread %d i %d n %d\n",tid,i,n); if(tid==0){ printf("\nthread %d i %d n %d\n",tid,i,n); printf("Pbefore [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n"); } carry = 0; for (j=0; j<N; j++) { P[j] = n * B[j]; if (tid==0) printf("\nPJ %d n %d BJ %d\n",P[j],n,B[j]); P[j] += carry; if (P[j] >= 10) { carry = P[j] / 10; P[j] %= 10; } else carry = 0; } if (carry) printf ("overflow in multiplication!\n"); /* shift the partial product left i bytes */ if(tid==0){ printf("\nthread %d i %d n %d\n",tid,i,n); printf("PSH0 [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n"); } for (j=N-1; j>=i; j--) P[j] = P[j-i]; while (j >= 0) P[j--] = 0; /* add result to the running sum */ if(tid==0){ printf("\nthread %d i %d n %d\n",tid,i,n); printf("P0 [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n"); } //int sum; carry = 0; sum = 0; for (j=0; j<N; j++) { sum = D[tid*N+j] + P[j] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; D[tid*N+j] = sum; } if (carry) printf ("overflow in addition!\n"); } #pragma omp barrier if(tid==0){ printf("D [ "); for(int loop = N*nthreads-1; loop >= 0; loop--) printf("%d ", D[loop]); printf("]\n"); for(int k=0; k<nthreads;k++){ carry = 0; sum = 0; for (j=0; j<N; j++) { sum = E[j] + D[k*N+j] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; E[j] = sum; } if (carry) printf ("overflow in addition!\n"); } printf("E [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", E[loop]); printf("]\n"); } } /*for(int loop = N-1; loop >= 0; loop--) printf("%d ", C[loop]); printf("]\n");*/ }
NETNTLMv2_fmt_plug.c
/* * NETNTLMv2_fmt.c -- NTLMv2 Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2009 * and placed in the public domain. * * Modified for performance, OMP and utf-8 support by magnum 2010-2011 * * This algorithm is designed for performing brute-force cracking of the NTLMv2 * challenge/response sets exchanged during network-based authentication * attempts [1]. The captured challenge/response set from these attempts * should be stored using the following format: * * USERNAME::DOMAIN:SERVER CHALLENGE:NTLMv2 RESPONSE:CLIENT CHALLENGE * * For example: * ntlmv2test::WORKGROUP:1122334455667788:07659A550D5E9D02996DFD95C87EC1D5:0101000000000000006CF6385B74CA01B3610B02D99732DD000000000200120057004F0052004B00470052004F00550050000100200044004100540041002E00420049004E0043002D0053004500430055005200490000000000 * * It should be noted that a NTLMv2 authentication response is not same as a NTLM * password hash, which can be extracted using tools such as FgDump [2]. NTLMv2 * challenge/response authentication typically takes place when the GPO * "Network Security: LAN Manager authentication level" is configured to a setting * that enforces the use of NTLMv2, such as "Send NTLMv2 response only\refuse * LM & NTLM." * * NTLMv2 responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theNtlmv2Response * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETNTLMv2; #elif FMT_REGISTERS_H john_register_one(&fmt_NETNTLMv2); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "md5.h" #include "hmacmd5.h" #include "unicode.h" #include "byteorder.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netntlmv2" #define FORMAT_NAME "NTLMv2 C/R" #define FORMAT_TAG "$NETNTLMv2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD4 HMAC-MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */ #define USERNAME_LENGTH 60 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */ #define DOMAIN_LENGTH 45 /* lmcons.h - CNLEN / DNLEN */ #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SERVER_CHALL_LENGTH 16 #define CLIENT_CHALL_LENGTH_MAX 1024 /* FIXME - Max Target Information Size Unknown */ #define SALT_SIZE 2 * USERNAME_LENGTH + 2 * DOMAIN_LENGTH + 3 + SERVER_CHALL_LENGTH/2 + CLIENT_CHALL_LENGTH_MAX/2 #define SALT_ALIGN 1 #define CIPHERTEXT_LENGTH 32 #define TOTAL_LENGTH 12 + USERNAME_LENGTH + DOMAIN_LENGTH + SERVER_CHALL_LENGTH + CLIENT_CHALL_LENGTH_MAX + CIPHERTEXT_LENGTH // these may be altered in init() if running OMP #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #ifndef OMP_SCALE #define OMP_SCALE 3072 #endif static struct fmt_tests tests[] = { {"", "password", {"USER1", "", "Domain", "1122334455667788","5E4AB1BF243DCA304A00ADEF78DC38DF","0101000000000000BB50305495AACA01338BC7B090A62856000000000200120057004F0052004B00470052004F00550050000000000000000000"} }, {"$NETNTLMv2$NTLMV2TESTWORKGROUP$1122334455667788$07659A550D5E9D02996DFD95C87EC1D5$0101000000000000006CF6385B74CA01B3610B02D99732DD000000000200120057004F0052004B00470052004F00550050000100200044004100540041002E00420049004E0043002D0053004500430055005200490000000000", "password"}, {"$NETNTLMv2$TESTUSERW2K3ADWIN7$1122334455667788$989B96DC6EAB529F72FCBA852C0D5719$01010000000000002EC51CEC91AACA0124576A744F198BDD000000000200120057004F0052004B00470052004F00550050000000000000000000", "testpass"}, {"$NETNTLMv2$USERW2K3ADWIN7$1122334455667788$5BD1F32D8AFB4FB0DD0B77D7DE2FF7A9$0101000000000000309F56FE91AACA011B66A7051FA48148000000000200120057004F0052004B00470052004F00550050000000000000000000", "password"}, // repeat in exactly the same form that is used in john.pot {"$NETNTLMv2$USERW2K3ADWIN7$1122334455667788$5bd1f32d8afb4fb0dd0b77d7de2ff7a9$0101000000000000309f56fe91aaca011b66a7051fa48148000000000200120057004f0052004b00470052004f00550050000000000000000000", "password"}, {"$NETNTLMv2$USER1W2K3ADWIN7$1122334455667788$027EF88334DAA460144BDB678D4F988D$010100000000000092809B1192AACA01E01B519CB0248776000000000200120057004F0052004B00470052004F00550050000000000000000000", "SomeLongPassword1BlahBlah"}, {"$NETNTLMv2$TEST_USERW2K3ADWIN7$1122334455667788$A06EC5ED9F6DAFDCA90E316AF415BA71$010100000000000036D3A13292AACA01D2CD95757A0836F9000000000200120057004F0052004B00470052004F00550050000000000000000000", "TestUser's Password"}, {"$NETNTLMv2$USER1Domain$1122334455667788$5E4AB1BF243DCA304A00ADEF78DC38DF$0101000000000000BB50305495AACA01338BC7B090A62856000000000200120057004F0052004B00470052004F00550050000000000000000000", "password"}, {"", "password", {"TESTWORKGROUP\\NTlmv2", "", "", "1122334455667788","07659A550D5E9D02996DFD95C87EC1D5","0101000000000000006CF6385B74CA01B3610B02D99732DD000000000200120057004F0052004B00470052004F00550050000100200044004100540041002E00420049004E0043002D0053004500430055005200490000000000"} }, {"", "password", {"NTlmv2", "", "TESTWORKGROUP", "1122334455667788","07659A550D5E9D02996DFD95C87EC1D5","0101000000000000006CF6385B74CA01B3610B02D99732DD000000000200120057004F0052004B00470052004F00550050000100200044004100540041002E00420049004E0043002D0053004500430055005200490000000000"} }, {"", "testpass", {"TestUser", "", "W2K3ADWIN7", "1122334455667788","989B96DC6EAB529F72FCBA852C0D5719","01010000000000002EC51CEC91AACA0124576A744F198BDD000000000200120057004F0052004B00470052004F00550050000000000000000000"} }, {"", "password", {"user", "", "W2K3ADWIN7", "1122334455667788","5BD1F32D8AFB4FB0DD0B77D7DE2FF7A9","0101000000000000309F56FE91AACA011B66A7051FA48148000000000200120057004F0052004B00470052004F00550050000000000000000000"} }, {"", "SomeLongPassword1BlahBlah", {"W2K3ADWIN7\\user1", "", "", "1122334455667788","027EF88334DAA460144BDB678D4F988D","010100000000000092809B1192AACA01E01B519CB0248776000000000200120057004F0052004B00470052004F00550050000000000000000000"} }, {"", "TestUser's Password", {"W2K3ADWIN7\\TEST_USER", "", "", "1122334455667788","A06EC5ED9F6DAFDCA90E316AF415BA71","010100000000000036D3A13292AACA01D2CD95757A0836F9000000000200120057004F0052004B00470052004F00550050000000000000000000"} }, {NULL} }; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*output)[BINARY_SIZE]; static HMACMD5Context (*saved_ctx); static uchar *challenge; static int keys_prepared; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); saved_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_ctx)); } static void done(void) { MEM_FREE(saved_ctx); MEM_FREE(output); MEM_FREE(saved_len); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strnlen(ciphertext, TOTAL_LENGTH + 1) > TOTAL_LENGTH) return 0; pos = &ciphertext[FORMAT_TAG_LEN]; /* Validate Username and Domain Length */ for (pos2 = pos; *pos2 != '$'; pos2++) if ((unsigned char)*pos2 < 0x20) return 0; if ( !(*pos2 && (pos2 - pos <= USERNAME_LENGTH + DOMAIN_LENGTH)) ) return 0; /* Validate Server Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == SERVER_CHALL_LENGTH)) ) return 0; /* Validate NTLMv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Client Challenge Length */ pos2++; pos = pos2; for (; atoi16[ARCH_INDEX(*pos2)] != 0x7F; pos2++); if ((pos2 - pos > CLIENT_CHALL_LENGTH_MAX) || (pos2 - pos < 28)) return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *login = split_fields[0]; char *uid = split_fields[2]; char *srv_challenge = split_fields[3]; char *nethashv2 = split_fields[4]; char *cli_challenge = split_fields[5]; char *identity = NULL, *tmp; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!login || !uid || !srv_challenge || !nethashv2 || !cli_challenge) return split_fields[1]; /* DOMAIN\USER: -or- USER::DOMAIN: */ if ((tmp = strstr(login, "\\")) != NULL) { identity = (char *) mem_alloc(strlen(login)*2 + 1); strcpy(identity, tmp + 1); /* Upper-Case Username - Not Domain */ enc_strupper(identity); strncat(identity, login, tmp - login); } else { identity = (char *) mem_alloc(strlen(login)*2 + strlen(uid) + 1); strcpy(identity, login); enc_strupper(identity); strcat(identity, uid); } tmp = (char *) mem_alloc(FORMAT_TAG_LEN + strlen(identity) + 1 + strlen(srv_challenge) + 1 + strlen(nethashv2) + 1 + strlen(cli_challenge) + 1); sprintf(tmp, "%s%s$%s$%s$%s", FORMAT_TAG, identity, srv_challenge, nethashv2, cli_challenge); MEM_FREE(identity); if (valid(tmp, self)) { char *cp = str_alloc_copy(tmp); MEM_FREE(tmp); return cp; } MEM_FREE(tmp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; char *pos = NULL; int identity_length = 0; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); memset(out, 0, TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); strlwr(&out[FORMAT_TAG_LEN + identity_length + 1]); /* Exclude: $NETNTLMv2$USERDOMAIN$ */ return out; } static void *get_binary(char *ciphertext) { static uchar *binary; char *pos = NULL; int i, identity_length; if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); identity_length = pos - (ciphertext + FORMAT_TAG_LEN); ciphertext += FORMAT_TAG_LEN + identity_length + 1 + SERVER_CHALL_LENGTH + 1; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary; } /* Calculate the NTLMv2 response for the given challenge, using the specified authentication identity (username and domain), password and client nonce. challenge: Identity length, Identity\0, Challenge Size, Server Challenge + Client Challenge */ static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int identity_length, challenge_size; int i = 0; /* --- HMAC #1 Calculations --- */ identity_length = challenge[0]; challenge_size = (*(challenge + 1 + identity_length + 1) << 8) | *(challenge + 1 + identity_length + 2); #ifdef _OPENMP #pragma omp parallel for for (i=0; i<count; i++) #endif { unsigned char ntlm_v2_hash[16]; HMACMD5Context ctx; if (!keys_prepared) { unsigned char ntlm[16]; int len; /* Generate 16-byte NTLM hash */ len = E_md4hash(saved_plain[i], saved_len[i], ntlm); // We do key setup of the next HMAC_MD5 here (once per salt) hmac_md5_init_K16(ntlm, &saved_ctx[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation } /* HMAC-MD5(Username + Domain, NTLM Hash) */ memcpy(&ctx, &saved_ctx[i], sizeof(ctx)); hmac_md5_update((unsigned char *)&challenge[1], identity_length, &ctx); hmac_md5_final(ntlm_v2_hash, &ctx); /* --- Blob Construction --- */ /* The blob consists of the target (from Type 2 message), client nonce and timestamp. This data was provided by the client during authentication and we can use it as is. */ /* --- HMAC #2 Calculations --- */ /* The (server) challenge from the Type 2 message is concatenated with the blob. The HMAC-MD5 message authentication code algorithm is applied to this value using the 16-byte NTLMv2 hash (calculated above) as the key. This results in a 16-byte output value. */ /* Generate 16-byte non-client nonce portion of NTLMv2 Response HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) The length of the challenge was set in get_salt(). We find the server challenge and blob following the identity and challenge size value. challenge -> Identity length, Identity\0, Size (2 bytes), Server Challenge + Client Challenge (Blob) */ hmac_md5(ntlm_v2_hash, challenge + 1 + identity_length + 1 + 2, challenge_size, (unsigned char*)output[i]); } keys_prepared = 1; return count; } static int cmp_all(void *binary, int count) { int index; for (index=0; index<count; index++) if (!memcmp(output[index], binary, BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, BINARY_SIZE); } static int cmp_exact(char *source, int index) { return !memcmp(output[index], get_binary(source), BINARY_SIZE); } /* We're essentially using three salts, but we're going to pack it into a single blob for now. Input: $NETNTLMv2$USER_DOMAIN$_SERVER_CHALLENGE_$_NTLMv2_RESP_$_CLIENT_CHALLENGE_ Username: <=20 Domain: <=15 Server Challenge: 8 bytes Client Challenge: ??? Output: Identity length, Identity(UTF16)\0, Challenge Size, Server Challenge + Client Challenge */ static void *get_salt(char *ciphertext) { static unsigned char *binary_salt; int i, identity_length, challenge_size; char *pos = NULL; #if !ARCH_ALLOWS_UNALIGNED static unsigned *bs2; if (!bs2) bs2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); #endif if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); memset(binary_salt, 0, SALT_SIZE); /* Calculate identity length */ for (pos = ciphertext + FORMAT_TAG_LEN; *pos != '$'; pos++); /* Convert identity (username + domain) string to NT unicode */ #if !ARCH_ALLOWS_UNALIGNED identity_length = enc_to_utf16((uint16_t *)bs2, 2 * (USERNAME_LENGTH + DOMAIN_LENGTH), (uchar *)ciphertext + FORMAT_TAG_LEN, pos - (ciphertext + FORMAT_TAG_LEN)) * sizeof(int16_t); if (identity_length < 0) // Truncated at Unicode conversion. identity_length = strlen16((UTF16 *)bs2) * sizeof(int16_t); memcpy(&binary_salt[1], bs2, identity_length); #else identity_length = enc_to_utf16((uint16_t *)&binary_salt[1], 2 * (USERNAME_LENGTH + DOMAIN_LENGTH), (uchar *)ciphertext + FORMAT_TAG_LEN, pos - (ciphertext + FORMAT_TAG_LEN)) * sizeof(int16_t); if (identity_length < 0) // Truncated at Unicode conversion. identity_length = strlen16((UTF16 *)&binary_salt[1]) * sizeof(int16_t); #endif /* Set server and client challenge size */ /* Skip: $NETNTLMv2$USER_DOMAIN$ */ ciphertext = pos + 1; /* SERVER_CHALLENGE$NTLMV2_RESPONSE$CLIENT_CHALLENGE --> SERVER_CHALLENGECLIENT_CHALLENGE */ /* CIPHERTEXT == NTLMV2_RESPONSE (16 bytes / 32 characters) */ challenge_size = (strlen(ciphertext) - CIPHERTEXT_LENGTH - 2) / 2; /* Store identity length */ binary_salt[0] = identity_length; /* Set challenge size in response - 2 bytes */ memset(binary_salt + 1 + identity_length, 0, 1); memset(binary_salt + 1 + identity_length + 1, (challenge_size & 0xFF00) >> 8, 1); memset(binary_salt + 1 + identity_length + 2, challenge_size & 0x00FF, 1); /* Set server challenge */ for (i = 0; i < SERVER_CHALL_LENGTH / 2; i++) binary_salt[identity_length + 1 + 2 + 1 + i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Set client challenge */ ciphertext += SERVER_CHALL_LENGTH + 1 + CIPHERTEXT_LENGTH + 1; for (i = 0; i < strlen(ciphertext) / 2; ++i) binary_salt[identity_length + 1 + 2 + 1 + SERVER_CHALL_LENGTH / 2 + i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; /* Return a concatenation of the server and client challenges and the identity value */ return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; } static void set_key(char *key, int index) { saved_len[index]= strlen(key); memcpy((char *)saved_plain[index], key, saved_len[index]+ 1); keys_prepared = 0; } static char *get_key(int index) { return (char *)saved_plain[index]; } static int salt_hash(void *salt) { // Hash the client challenge (in case server salt was spoofed) int identity_length = ((char *)salt)[0]; unsigned int hash; char *chal = ((char*)salt)+1+identity_length+1+2+8; hash = chal[0] + (chal[1] << 8) + (chal[2] << 16) + (((unsigned int)chal[3]) << 24); return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_NETNTLMv2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB110-ordered-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <stdio.h> /* This is a program based on a test contributed by Yizi Gu@Rice Univ. * Proper user of ordered directive and clause, no data races * */ #include <omp.h> int main() { int x = 0; #pragma omp parallel for reduction (+:x) for (int i = 0; i <= 99; i += 1) { x++; } (((void )(sizeof(((x == 100?1 : 0))))) , (( { if (x == 100) ; else __assert_fail("x==100","DRB110-ordered-orig-no.c",57,__PRETTY_FUNCTION__); }))); printf("x=%d\n",x); return 0; }
GB_unop__identity_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_uint64) // op(A') function: GB (_unop_tran__identity_uint8_uint64) // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_uint64) ( uint8_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
insider_runtime.c
#define _GNU_SOURCE #include <assert.h> #include <dlfcn.h> #include <fcntl.h> #include <insider_macros.h> #include <limits.h> #include <omp.h> #include <pthread.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #define MAX_CMD_LEN (4096) #define MAX_PATH_LEN (4096) #define MAX_CMD_OUTPUT_LEN (4096) #define MAX_LINE_LEN (4096) #define PAGE_SIZE (1 << 22) #define MMIO_SPACE_SIZE (1 << 25) #define ALLOCATED_BUF_NUM (8) #define VIRT_FILE_FD (0xFFFF) #define BUF_METADATA_IDX (1 << 21) #define PHYSICAL_SECTOR_SIZE (4096) #ifndef PAR_MEMCPY_WORKERS #define PAR_MEMCPY_WORKERS (4) #endif struct ioctl_req { char *real_file_path_ptr; uint32_t real_file_path_len; int8_t is_lock; }; #define IOCTL_CMD _IOW('a', 'a', struct ioctl_req *) const char DISK_NAME[] = "/dev/nvme_fpga"; const char DRIVER_DEVICE_NAME[] = "/dev/insider_runtime"; const char FILEFRAG_CMD[] = "filefrag -v "; const char FILEFRAG_FILTER_CMD[] = " | cut -d \":\" -f 3,4 | awk 'NR > 3' | sed \"s/.* " "\\([0-9]\\+\\)\\.\\..*:\\(.*\\)/\\1 \\2/g\""; const char DF_CMD[] = "df "; const char DF_FILTER_DEVICE_NAME_CMD[] = " | sed \"2, 2p\" -n | awk '{print $1}'"; const char DF_FILTER_MOUNT_POINT_CMD[] = " | sed -n 2p | awk '{print $6}'"; const char LS_CMD[] = "ls -l "; const char LS_FILTER_CMD[] = " | awk '{print $5}'"; const char TOUCH_CMD[] = "touch "; const char REALPATH_CMD[] = "realpath --relative-to="; int mmio_fd; void *mmio_space; void *app_bufs[ALLOCATED_BUF_NUM]; int app_buf_fds[ALLOCATED_BUF_NUM]; unsigned long long app_buf_phy_addrs[ALLOCATED_BUF_NUM]; char mount_point_path[MAX_PATH_LEN]; char *locked_real_files_paths[MAX_EXTENT_NUM]; int num_locked_real_files = 0; int app_bufs_ptr = 0; int is_eop = 0; int buf_idx = 0; int buf_len = 0; int file_finish_reading = 0; int first = 1; int is_write; unsigned long long host_written_bytes = 0; pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static int drop_cache(const char *file_path) { int ret = 0; int fd = open(file_path, O_RDONLY); if (fd < 0) { goto cleanup; } struct stat buf; ret = fstat(fd, &buf); if (ret < 0) { goto cleanup; } off_t size = buf.st_size; off_t size_round_to_4k = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE; ret = posix_fadvise(fd, 0, size_round_to_4k, POSIX_FADV_DONTNEED); if (ret < 0) { goto cleanup; } cleanup: if (fd > 0) { close(fd); } return ret; } static int general_file_blocks_op(char *real_file_path, int8_t is_lock) { struct ioctl_req req; req.real_file_path_ptr = real_file_path; req.real_file_path_len = strlen(real_file_path); req.is_lock = is_lock; int fd = open(DRIVER_DEVICE_NAME, O_RDWR); if (fd > 0) { return fd; } int ret = ioctl(fd, IOCTL_CMD, (struct ioctl_req *)(&req)); if (fd > 0) { close(fd); } return ret; } static int lock_file_blocks(char *real_file_path) { return general_file_blocks_op(real_file_path, 1); } static int unlock_file_blocks(char *real_file_path) { return general_file_blocks_op(real_file_path, 0); } static void reset_all(void) { app_bufs_ptr = is_eop = buf_idx = buf_len = file_finish_reading = 0; first = 1; host_written_bytes = 0; num_locked_real_files = 0; } void send_input_params(unsigned int data) { *((volatile unsigned int *)mmio_space + APP_INPUT_PARAM_TAG) = data; } void send_input_params_array(unsigned int *data_arr, size_t arr_len) { int i = 0; for (i = 0; i < arr_len; i++) { *((volatile unsigned int *)mmio_space + APP_INPUT_PARAM_TAG) = data_arr[i]; } } __inline__ static void send_control_msg(int tag, unsigned int data) { *((volatile unsigned int *)mmio_space + tag) = data; } __inline__ static unsigned int receive_control_msg(int tag) { return *((volatile unsigned int *)mmio_space + tag); } static int has_permission(const char *pathname, int flags) { int fd = open(pathname, flags); if (fd < 0) { return 0; } close(fd); return 1; } static void get_mappings_file_name(char *mappings_file_name) { mappings_file_name[0] = '.'; assert(getlogin_r(mappings_file_name + 1, MAX_PATH_LEN - 1) == 0); char suffix[] = ".insider"; strcat(mappings_file_name, suffix); } static void *allocate_kernel_buf(int *configfd) { void *address; *configfd = open("/dev/fpga_dma", O_RDWR); if (*configfd < 0) { perror("Error in dma driver."); exit(-1); } address = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, *configfd, 0); if (address == MAP_FAILED) { perror("Mmap operation failed."); exit(-1); } return address; } static void setup_mmio(void) { mmio_fd = open("/sys/devices/pci0000:00/0000:00:1d.0/resource0", O_RDWR); if (mmio_fd < 0) { perror("Error for mmapping the mmio region,"); } mmio_space = mmap(NULL, MMIO_SPACE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, mmio_fd, 0); if (mmio_space == MAP_FAILED) { perror("Mmap operation failed."); exit(-1); } } static size_t get_file_length(const char *real_file_name) { FILE *fp; char *cmd = malloc(MAX_CMD_LEN); char *buf = malloc(MAX_CMD_OUTPUT_LEN); size_t file_size; cmd[0] = 0; strcat(cmd, LS_CMD); strcat(cmd, real_file_name); strcat(cmd, LS_FILTER_CMD); fp = popen(cmd, "r"); fgets(buf, MAX_CMD_OUTPUT_LEN, fp); sscanf(buf, "%zu", &file_size); if (fp) pclose(fp); free(cmd); return file_size; } static void get_file_extents(const char *real_file_name, unsigned int *num_extents, unsigned long long *extents_physical_start_arr, unsigned long long *extents_len_arr) { FILE *fp; char *buf = malloc(MAX_CMD_OUTPUT_LEN); char *cmd = malloc(MAX_CMD_LEN); // get file extents cmd[0] = 0; strcat(cmd, FILEFRAG_CMD); strcat(cmd, real_file_name); strcat(cmd, FILEFRAG_FILTER_CMD); unsigned int start, len; fp = popen(cmd, "r"); fread(buf, 1, MAX_CMD_OUTPUT_LEN, fp); *num_extents = 0; char *filefrag_output = buf; size_t total_extents_length = 0; while (sscanf(filefrag_output, "%u %u", &start, &len) > 0) { extents_physical_start_arr[*num_extents] = (unsigned long long)(start)*PHYSICAL_SECTOR_SIZE; extents_len_arr[*num_extents] = (unsigned long long)(len)*PHYSICAL_SECTOR_SIZE; total_extents_length += (unsigned long long)(len)*PHYSICAL_SECTOR_SIZE; (*num_extents)++; if ((*num_extents) > MAX_EXTENT_NUM) { puts("Error: the mapped file has too many extents (too fragmented)."); exit(-1); } filefrag_output = strstr(filefrag_output, "\n"); if (filefrag_output) { filefrag_output++; } else { break; } } extents_len_arr[(*num_extents) - 1] -= total_extents_length - get_file_length(real_file_name); if (fp) pclose(fp); free(buf); free(cmd); } static void extract_file_extents(size_t offset, size_t len, unsigned int *num_extents, unsigned long long *extents_physical_start_arr, unsigned long long *extents_len_arr) { size_t i; size_t starting_extent_num = 0; for (i = 0; i < (*num_extents); i++) { if (offset < extents_len_arr[i]) { starting_extent_num = i; break; } else { offset -= extents_len_arr[i]; } } unsigned int output_num_extents = 0; unsigned long long output_extents_physical_start_arr[MAX_EXTENT_NUM]; unsigned long long output_extents_len_arr[MAX_EXTENT_NUM]; unsigned long long total_extents_size = 0; for (i = 0; i < (*num_extents) - starting_extent_num; i++) { if (i == 0) { output_extents_physical_start_arr[i] = extents_physical_start_arr[starting_extent_num] + offset; output_extents_len_arr[i] = extents_len_arr[starting_extent_num] - offset; } else { output_extents_physical_start_arr[i] = extents_physical_start_arr[starting_extent_num + i]; output_extents_len_arr[i] = extents_len_arr[starting_extent_num + i]; } total_extents_size += output_extents_len_arr[i]; if (total_extents_size >= len) { output_num_extents = i + 1; output_extents_len_arr[i] -= total_extents_size - len; break; } } *num_extents = output_num_extents; for (i = 0; i < output_num_extents; i++) { extents_physical_start_arr[i] = output_extents_physical_start_arr[i]; extents_len_arr[i] = output_extents_len_arr[i]; } } static int is_from_nvme_fpga(const char *pathname) { char *cmd = malloc(sizeof(DF_CMD) + MAX_PATH_LEN); cmd[0] = 0; strcat(cmd, DF_CMD); strcat(cmd, pathname); strcat(cmd, DF_FILTER_DEVICE_NAME_CMD); FILE *fp = popen(cmd, "r"); char *buf = malloc(MAX_CMD_OUTPUT_LEN); fgets(buf, MAX_CMD_OUTPUT_LEN, fp); pclose(fp); int ret = 0; if (strncmp(buf, DISK_NAME, strlen(buf) - 1) == 0) { cmd[0] = 0; strcat(cmd, DF_CMD); strcat(cmd, pathname); strcat(cmd, DF_FILTER_MOUNT_POINT_CMD); fp = popen(cmd, "r"); fgets(mount_point_path, MAX_CMD_OUTPUT_LEN, fp); mount_point_path[strlen(mount_point_path) - 1] = '/'; pclose(fp); ret = 1; } free(cmd); free(buf); return ret; } static const char *get_absolute_path(const char *path) { return realpath(path, NULL); } static const char *calculate_relative_path(const char *comparing_path, const char *compared_path) { FILE *fp; char *cmd = malloc(MAX_CMD_LEN); char *buf = malloc(MAX_CMD_OUTPUT_LEN); cmd[0] = 0; strcat(cmd, REALPATH_CMD); strcat(cmd, compared_path); strcat(cmd, " "); strcat(cmd, comparing_path); fp = popen(cmd, "r"); fgets(buf, MAX_CMD_OUTPUT_LEN, fp); buf[strlen(buf) - 1] = 0; free(cmd); fclose(fp); return buf; } static int is_registered(const char *pathname, unsigned int *num_extents, unsigned long long *extents_physical_start_arr, unsigned long long *extents_len_arr, unsigned long long *file_size, int flags) { char *mappings_path = malloc(MAX_PATH_LEN); char *virt_file_name = malloc(MAX_PATH_LEN); char *mapping_file_name = malloc(MAX_PATH_LEN); strcpy(mappings_path, mount_point_path); get_mappings_file_name(mapping_file_name); strncpy(mappings_path + strlen(mount_point_path), mapping_file_name, strlen(mapping_file_name)); mappings_path[strlen(mount_point_path) + strlen(mapping_file_name)] = '\0'; FILE *fp = fopen(mappings_path, "r"); int ret = 0; *num_extents = 0; char *buf = malloc(MAX_LINE_LEN); const char *relative_path_to_mount_point = (const char *)calculate_relative_path(pathname, mount_point_path); char *real_file_relative_path = malloc(MAX_PATH_LEN); char *real_file_absolute_path = malloc(MAX_PATH_LEN); unsigned int cur_file_num_extents; unsigned long long cur_file_extents_physical_start_arr[MAX_EXTENT_NUM]; unsigned long long cur_file_extents_len_arr[MAX_EXTENT_NUM]; if (fp) { size_t sg_list_len, off, len; while (fscanf(fp, "%s %zu", virt_file_name, &sg_list_len) != EOF) { if (!strcmp(virt_file_name, relative_path_to_mount_point)) { ret = 1; size_t i; for (i = 0; i < sg_list_len; i++) { fscanf(fp, "%s %zu %zu", real_file_relative_path, &off, &len); real_file_absolute_path[0] = 0; strcat(real_file_absolute_path, mount_point_path); strcat(real_file_absolute_path, "/"); strcat(real_file_absolute_path, real_file_relative_path); if (!has_permission(real_file_absolute_path, flags)) { return 0; } if (drop_cache(real_file_absolute_path) < 0) { puts("Error: fail to drop the page cache of the real file."); } locked_real_files_paths[num_locked_real_files] = malloc(MAX_PATH_LEN); strcpy(locked_real_files_paths[num_locked_real_files], real_file_absolute_path); num_locked_real_files++; if (lock_file_blocks(real_file_absolute_path) < 0) { return 0; } (*file_size) += len; get_file_extents(real_file_absolute_path, &cur_file_num_extents, cur_file_extents_physical_start_arr, cur_file_extents_len_arr); extract_file_extents(off, len, &cur_file_num_extents, cur_file_extents_physical_start_arr, cur_file_extents_len_arr); int j; for (j = 0; j < cur_file_num_extents; j++) { extents_physical_start_arr[*num_extents] = cur_file_extents_physical_start_arr[j]; extents_len_arr[*num_extents] = cur_file_extents_len_arr[j]; (*num_extents)++; if ((*num_extents) > MAX_EXTENT_NUM) { fprintf(stderr, "Too many extents.\n"); return 0; } } } break; } else { // Consume the line. fgets(buf, MAX_CMD_OUTPUT_LEN, fp); } } } free(mappings_path); free((void *)relative_path_to_mount_point); free(virt_file_name); free(mapping_file_name); free(buf); free(real_file_relative_path); free(real_file_absolute_path); if (fp) fclose(fp); return ret; } static int is_virtual_file(const char *pathname, unsigned int *num_extents, unsigned long long *extents_physical_start_arr, unsigned long long *extents_len_arr, unsigned long long *file_size, int flags) { return is_from_nvme_fpga(pathname) && is_registered(pathname, num_extents, extents_physical_start_arr, extents_len_arr, file_size, flags); } const char *reg_virt_file_sg(size_t sg_list_len, const char **real_file_paths, size_t *offs, size_t *lens) { if (sg_list_len <= 0 || sg_list_len > MAX_EXTENT_NUM) { return NULL; } // Check whether all real files are at INSIDER drive. size_t i; for (i = 0; i < sg_list_len; i++) { if (!is_from_nvme_fpga(real_file_paths[i])) { return NULL; } if (offs[i] + lens[i] > get_file_length(real_file_paths[i])) { return NULL; } } // Find a proper virtual file name. char *virt_file_name = malloc(MAX_PATH_LEN); char *absolute_virt_file_path = malloc(MAX_PATH_LEN); virt_file_name[0] = 0; strcat(virt_file_name, "virt_"); const char *pos = strrchr(real_file_paths[0], '/'); char *relative_real_path = malloc(MAX_PATH_LEN); strcpy(relative_real_path, pos + 1); strcat(virt_file_name, relative_real_path); size_t prefix_len = (pos == NULL) ? 0 : pos - real_file_paths[0] + 1; while (1) { strncpy(absolute_virt_file_path, real_file_paths[0], prefix_len); absolute_virt_file_path[prefix_len] = '\0'; strcat(absolute_virt_file_path, virt_file_name); if (access(absolute_virt_file_path, F_OK)) { break; } strcat(virt_file_name, "_"); } // Open the mapping file. char *mappings_path = malloc(MAX_PATH_LEN); strcpy(mappings_path, mount_point_path); char *mappings_file_name = malloc(MAX_PATH_LEN); get_mappings_file_name(mappings_file_name); strncpy(mappings_path + strlen(mount_point_path), mappings_file_name, strlen(mappings_file_name)); mappings_path[strlen(mount_point_path) + strlen(mappings_file_name)] = '\0'; FILE *fp = fopen(mappings_path, "a"); // Update the mapping file. const char *relative_path_to_mount_point = calculate_relative_path(absolute_virt_file_path, mount_point_path); fprintf(fp, "%s %zu ", relative_path_to_mount_point, sg_list_len); for (i = 0; i < sg_list_len; i++) { const char *absolute_file_path = get_absolute_path(real_file_paths[i]); const char *relative_file_path = calculate_relative_path(absolute_file_path, mount_point_path); fprintf(fp, "%s %zu %zu ", relative_file_path, offs[i], lens[i]); free((void *)absolute_file_path); free((void *)relative_file_path); } fprintf(fp, "\n"); fclose(fp); // touch virtual file FILE *cmd_fp; char *cmd = malloc(MAX_CMD_LEN); cmd[0] = 0; strcat(cmd, TOUCH_CMD); strcat(cmd, absolute_virt_file_path); cmd_fp = popen(cmd, "r"); pclose(cmd_fp); free(mappings_file_name); free(virt_file_name); free(relative_real_path); free(mappings_path); free(cmd); return absolute_virt_file_path; } const char *reg_virt_file(const char *real_path) { size_t off = 0; size_t len = get_file_length(real_path); return reg_virt_file_sg(1, (const char **)&real_path, &off, &len); } int vopen(const char *pathname, int flags) { if (flags != O_RDONLY && flags != O_WRONLY) { return -1; } is_write = (O_WRONLY == flags); unsigned int num_extents; unsigned long long *extents_physical_start_arr = malloc(sizeof(unsigned long long) * MAX_EXTENT_NUM); unsigned long long *extents_len_arr = malloc(sizeof(unsigned long long) * MAX_EXTENT_NUM); unsigned long long length; if (!is_virtual_file(pathname, &num_extents, extents_physical_start_arr, extents_len_arr, &length, flags)) { return -1; } else { setup_mmio(); pthread_mutex_lock(&mutex); send_control_msg(APP_IS_WRITE_MODE_TAG, is_write); pthread_mutex_unlock(&mutex); int i; for (i = 0; i < ALLOCATED_BUF_NUM; i++) { app_bufs[i] = allocate_kernel_buf(&app_buf_fds[i]); app_buf_phy_addrs[i] = *((unsigned long long *)app_bufs[i]); memset(app_bufs[i], 0, PAGE_SIZE); pthread_mutex_lock(&mutex); send_control_msg(APP_BUF_ADDRS_TAG, app_buf_phy_addrs[i] >> 32); send_control_msg(APP_BUF_ADDRS_TAG, app_buf_phy_addrs[i] & 0xFFFFFFFF); pthread_mutex_unlock(&mutex); } pthread_mutex_lock(&mutex); send_control_msg(APP_FILE_INFO_TAG, num_extents); send_control_msg(APP_FILE_INFO_TAG, length >> 32); send_control_msg(APP_FILE_INFO_TAG, length & 0xFFFFFFFF); pthread_mutex_unlock(&mutex); for (i = 0; i < num_extents; i++) { pthread_mutex_lock(&mutex); unsigned long long extents_start_in_byte = extents_physical_start_arr[i]; send_control_msg(APP_FILE_INFO_TAG, extents_start_in_byte >> 32); send_control_msg(APP_FILE_INFO_TAG, extents_start_in_byte & 0xFFFFFFFF); unsigned long long extents_len_in_byte = extents_len_arr[i]; send_control_msg(APP_FILE_INFO_TAG, extents_len_in_byte >> 32); send_control_msg(APP_FILE_INFO_TAG, extents_len_in_byte & 0xFFFFFFFF); pthread_mutex_unlock(&mutex); } } file_finish_reading = 0; first = 1; return VIRT_FILE_FD; } static void reset(void) { app_bufs_ptr = is_eop = buf_idx = buf_len = 0; first = 1; } static void parallel_memcpy(void *dest, const void *src, size_t n) { int size_per_worker = (n + PAR_MEMCPY_WORKERS - 1) / PAR_MEMCPY_WORKERS; int size_last = n - size_per_worker * (PAR_MEMCPY_WORKERS - 1); #pragma omp parallel num_threads(PAR_MEMCPY_WORKERS) { int tid = omp_get_thread_num(); int copy_size = (tid != (PAR_MEMCPY_WORKERS - 1)) ? size_per_worker : size_last; memcpy((unsigned char *)dest + size_per_worker * tid, (unsigned char *)src + size_per_worker * tid, copy_size); } } __inline__ static void update_read_metadata(void) { unsigned int metadata = 0, flag = 0; volatile unsigned char *flag_ptr; volatile unsigned char *metadata_ptr; do { metadata_ptr = (volatile unsigned char *)(app_bufs[app_bufs_ptr] + BUF_METADATA_IDX); flag_ptr = (volatile unsigned char *)(app_bufs[app_bufs_ptr] + BUF_METADATA_IDX + sizeof(unsigned int)); flag = ((*(flag_ptr + 3)) << 24) | ((*(flag_ptr + 2)) << 16) | ((*(flag_ptr + 1)) << 8) | ((*(flag_ptr + 0)) << 0); metadata = ((*(metadata_ptr + 3)) << 24) | ((*(metadata_ptr + 2)) << 16) | ((*(metadata_ptr + 1)) << 8) | ((*(metadata_ptr + 0)) << 0); } while (!(flag)); *flag_ptr = *(flag_ptr + 1) = *(flag_ptr + 2) = *(flag_ptr + 3) = 0; buf_len = metadata >> 1; is_eop = metadata & 0x1; } ssize_t vread(int fd, void *buf, size_t count) { if (is_write) { return -1; } if (fd == VIRT_FILE_FD) { if (file_finish_reading) { return 0; } else if (first) { update_read_metadata(); first = 0; } unsigned char *app_buf = (unsigned char *)app_bufs[app_bufs_ptr]; ssize_t read_size = 0; if (count >= buf_len - buf_idx) { read_size = buf_len - buf_idx; if (is_eop) { parallel_memcpy(buf, app_buf + buf_idx, read_size); file_finish_reading = 1; reset(); } else { parallel_memcpy(buf, app_buf + buf_idx, read_size); pthread_mutex_lock(&mutex); send_control_msg(APP_FREE_BUF_TAG, 0); pthread_mutex_unlock(&mutex); app_bufs_ptr = (app_bufs_ptr + 1) & (ALLOCATED_BUF_NUM - 1); buf_idx = 0; update_read_metadata(); } } else { read_size = count; parallel_memcpy(buf, app_buf + buf_idx, read_size); buf_idx += read_size; } return read_size; } else { return -1; } } __inline__ static void commit_write_buf(unsigned int len) { volatile unsigned char *metadata_ptr; metadata_ptr = (volatile unsigned char *)(app_bufs[app_bufs_ptr]) + BUF_METADATA_IDX; *metadata_ptr = *(metadata_ptr + 1) = *(metadata_ptr + 2) = *(metadata_ptr + 3) = 1; pthread_mutex_lock(&mutex); send_control_msg(APP_COMMIT_WRITE_BUF_TAG, len); pthread_mutex_unlock(&mutex); } static ssize_t real_written_bytes_count(int fd) { if (fd == VIRT_FILE_FD) { unsigned long long real_written_bytes = 0; pthread_mutex_lock(&mutex); real_written_bytes = ((unsigned long long)receive_control_msg(APP_REAL_WRITTEN_BYTES_TAG)) << 32; real_written_bytes |= ((unsigned long long)receive_control_msg(APP_REAL_WRITTEN_BYTES_TAG)); pthread_mutex_unlock(&mutex); return real_written_bytes; } else { return -1; } } static ssize_t virt_written_bytes_count(int fd) { if (fd == VIRT_FILE_FD) { unsigned long long virt_written_bytes = 0; pthread_mutex_lock(&mutex); virt_written_bytes = ((unsigned long long)receive_control_msg(APP_VIRT_WRITTEN_BYTES_TAG)) << 32; virt_written_bytes |= ((unsigned long long)receive_control_msg(APP_VIRT_WRITTEN_BYTES_TAG)); pthread_mutex_unlock(&mutex); return virt_written_bytes; } else { return -1; } } int vclose_with_rsize(int fd, size_t *rfile_written_bytes) { if (fd == VIRT_FILE_FD) { if (is_write) { if (buf_idx) { commit_write_buf(buf_idx); } pthread_mutex_lock(&mutex); send_control_msg(APP_WRITE_TOTAL_LEN_TAG, host_written_bytes >> 32); send_control_msg(APP_WRITE_TOTAL_LEN_TAG, host_written_bytes & 0xFFFFFFFF); while (!receive_control_msg(APP_WRITE_FINISHED_TAG)) ; pthread_mutex_unlock(&mutex); } if (rfile_written_bytes != NULL) { *rfile_written_bytes = real_written_bytes_count(fd); } reset_all(); int i; for (i = 0; i < ALLOCATED_BUF_NUM; i++) { if (app_buf_fds[i] > 0) { close(app_buf_fds[i]); } } if (mmio_fd) { close(mmio_fd); } pthread_mutex_lock(&mutex); send_control_msg(RESET_TAG, 0); pthread_mutex_unlock(&mutex); int ret = 0; for (i = 0; i < num_locked_real_files; i++) { int tmp; if ((tmp = unlock_file_blocks(locked_real_files_paths[i])) < 0) { ret = tmp; } if ((tmp = drop_cache(locked_real_files_paths[i])) < 0) { ret = tmp; } free(locked_real_files_paths[i]); } return ret; } else { return -1; } return 0; } int vclose(int fd) { return vclose_with_rsize(fd, NULL); } __inline__ static void wait_write_buf(void) { unsigned int metadata = 0; volatile unsigned char *metadata_ptr; do { metadata_ptr = (volatile unsigned char *)(app_bufs[app_bufs_ptr]) + BUF_METADATA_IDX; metadata = ((*(metadata_ptr + 3)) << 24) | ((*(metadata_ptr + 2)) << 16) | ((*(metadata_ptr + 1)) << 8) | ((*(metadata_ptr + 0)) << 0); } while (metadata); } int vwrite(int fd, void *buf, size_t count) { if (!is_write) { return -1; } if (fd == VIRT_FILE_FD) { unsigned char *app_buf = (unsigned char *)app_bufs[app_bufs_ptr]; ssize_t write_size = 0; if (count >= BUF_METADATA_IDX - buf_idx) { write_size = BUF_METADATA_IDX - buf_idx; parallel_memcpy(app_buf + buf_idx, buf, write_size); commit_write_buf(BUF_METADATA_IDX); app_bufs_ptr = (app_bufs_ptr + 1) & (ALLOCATED_BUF_NUM - 1); buf_idx = 0; wait_write_buf(); } else { write_size = count; parallel_memcpy(app_buf + buf_idx, buf, write_size); buf_idx += write_size; } host_written_bytes += write_size; return write_size; } else { return -1; } } int vsync(int fd) { if (fd == VIRT_FILE_FD) { while (virt_written_bytes_count(fd) != host_written_bytes) ; return 0; } else { return -1; } } unsigned int dbg_peek(unsigned int req) { pthread_mutex_lock(&mutex); send_control_msg(APP_DBG_PEEK_TAG, req); unsigned int ret = receive_control_msg(APP_DBG_PEEK_TAG); pthread_mutex_unlock(&mutex); return ret; } void setup_no_vfile(void) { setup_mmio(); send_control_msg(RESET_TAG, 0); }
nlk_pv_class.c
/****************************************************************************** * NLK - Neural Language Kit * * Copyright (c) 2015 Luis Rei <me@luisrei.com> http://luisrei.com @lmrei * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. *****************************************************************************/ /** @file nlk_pv.c * Paragraph Classification functions * @note: code relative to training a PV model is in nlk_w2v.c */ #include <errno.h> #include <stdio.h> #include <omp.h> #include "nlk_tic.h" #include "nlk_neuralnet.h" #include "nlk_layer_lookup.h" #include "nlk_layer_linear.h" #include "nlk_transfer.h" #include "nlk_criterion.h" #include "nlk_learn_rate.h" #include "nlk_dataset.h" #include "nlk_util.h" #include "nlk_text.h" #include "nlk_vocabulary.h" #include "nlk_w2v.h" #include "nlk_pv.h" #include "nlk_pv_class.h" unsigned int * nlk_pv_classify(struct nlk_neuralnet_t *nn, struct nlk_layer_lookup_t *par_table, size_t *ids, size_t n, const bool verbose) { /** @section Init */ if(verbose) { nlk_tic("Classifying ", false); printf("%zu\n", n); } unsigned int *pred = NULL; pred = (unsigned int *) malloc(n * sizeof(unsigned int)); if(pred == NULL) { NLK_ERROR_NULL("unable to allocate memory", NLK_ENOMEM); /* unreachable */ } /* PV size */ const size_t pv_size = par_table->weights->cols; /* softmax layer */ struct nlk_layer_linear_t *linear = nn->layers[nn->n_layers - 1].ll; unsigned int n_classes = linear->weights->rows; /** @section Classify */ #pragma omp parallel { /** @subsection Parallel Initialization */ /* paragraph id */ size_t pid = 0; /* paragraph vector */ NLK_ARRAY *pv = nlk_array_create(pv_size, 1); /* output of the linear layer */ NLK_ARRAY *linear_out = nlk_array_create(n_classes, 1); /* output of the softmax transfer (and thus the network) */ NLK_ARRAY *out = nlk_array_create(n_classes, 1); /** @subsection Parallel Classify */ #pragma omp for /* for each pv */ for(size_t tid = 0; tid < n; tid++) { pid = ids[tid]; /* forward step 1: get paragraph vector (lookup) */ nlk_layer_lookup_forward_lookup_one(par_table, pid, pv); /* forward step 2: linear layer */ nlk_layer_linear_forward(linear, pv, linear_out); /* forward step 3: softmax transfer */ nlk_log_softmax_forward(linear_out, out); pred[tid] = nlk_array_max_i(out); } } /* end of parallel region */ return pred; } /** * Train a PV vector softmax classifier */ float nlk_pv_class_train(struct nlk_neuralnet_t *nn, struct nlk_dataset_t *dset, const unsigned int iter, nlk_real learn_rate, const nlk_real learn_rate_decay, const bool verbose) { float accuracy = 0; /** @section Shortcuts */ /* paragraph lookup table */ struct nlk_layer_lookup_t *par_table = nn->paragraphs; /* PV size */ const size_t pv_size = par_table->weights->cols; /* softmax layer */ struct nlk_layer_linear_t *linear = nn->layers[nn->n_layers - 1].ll; const unsigned int n_classes = dset->n_classes; /* n_classes should be equal to linear->weights->rows */ /* dataset */ const size_t size = dset->size; /** @section Train Classes */ size_t pid; /* the parapraph id */ /* paragraph vector */ NLK_ARRAY *pv = nlk_array_create(pv_size, 1); /* output of the linear layer */ NLK_ARRAY *linear_out = nlk_array_create(n_classes, 1); /* output of the softmax transfer (and thus the network) */ NLK_ARRAY *out = nlk_array_create(n_classes, 1); /* gradient at output */ NLK_ARRAY *grad_out = nlk_array_create(n_classes, 1); /* gradient at softmax input = gradient at linear output */ NLK_ARRAY *grad_sm_in = nlk_array_create(n_classes, 1); /* gradient at linear layer input */ NLK_ARRAY *grad_in = nlk_array_create(pv_size, 1); /** @subsection Train Cycle */ for(unsigned int local_iter = 1; local_iter <= iter; local_iter++) { accuracy = 0; size_t correct = 0; unsigned int pred = 0; /* shuffle the data */ nlk_dataset_shuffle(dset); /* for each pv */ for(size_t tid = 0; tid < size; tid++) { /** @subsection Forward */ /* get paragraph id */ pid = dset->ids[tid]; /* forward step 1: get paragraph vector (lookup) */ nlk_layer_lookup_forward_lookup_one(par_table, pid, pv); /* forward step 2: linear layer */ nlk_layer_linear_forward(linear, pv, linear_out); /* forward step 3: softmax transfer */ nlk_log_softmax_forward(linear_out, out); /* check result */ pred = nlk_array_max_i(out); if(pred == dset->classes[tid]) { correct++; } /** @subsection Backpropation */ /* Backprop step 1: Negative Log Likelihood */ nlk_nll_backprop(out, dset->classes[tid], grad_out); /* apply learning rate */ nlk_array_scale(learn_rate, grad_out); /* Backprop step 2: softmax transfer */ nlk_log_softmax_backprop(out, grad_out, grad_sm_in); /* Backprop step3: linear layer * no need to update gradient at input, just update parameters: */ nlk_layer_linear_update_parameters(linear, pv, grad_sm_in); } /* end of paragraphs */ accuracy = correct / (float) dset->size; if(verbose) { printf("[%d/%d] accuracy = %f (%zu / %zu) alpha = %f\n", local_iter, iter, accuracy, correct, dset->size, learn_rate); } /* update learning rate */ learn_rate = nlk_learn_rate_decay(learn_rate, learn_rate_decay); } /* end of iterations */ /** @subsection Cleanup */ nlk_array_free(pv); nlk_array_free(linear_out); nlk_array_free(out); nlk_array_free(grad_out); nlk_array_free(grad_sm_in); nlk_array_free(grad_in); return accuracy; } /** * Creates and Trains PV classifier * Creates a LogSoftMax Layer and ads it to the network then trains it. * * @param iter the number of supervised iterations * * @return accuracy on the train set */ float nlk_pv_classifier(struct nlk_neuralnet_t *nn, struct nlk_dataset_t *dset, const unsigned int iter, nlk_real learn_rate, const nlk_real learn_rate_decay, const bool verbose) { double accuracy = 0; /**@section Shortcuts and Initializations */ /**@subsection Create the softmax layer * softmax layer = linear layer followed by a softmax transfer */ /* embedding size of the paragraphs */ const size_t pv_size = nn->paragraphs->weights->cols; /* create */ struct nlk_layer_linear_t *linear = NULL; linear = nlk_layer_linear_create(dset->n_classes, pv_size, true); /* init */ nlk_layer_linear_init_sigmoid(linear); /* not a sigmoid but meh */ /* add to neural network */ nlk_neuralnet_expand(nn, 1); nlk_neuralnet_add_layer_linear(nn, linear); /* Train */ nlk_pv_class_train(nn, dset, iter, learn_rate, learn_rate_decay, verbose); /* Test on Training Set */ unsigned int *pred = NULL; pred = nlk_pv_classify(nn, nn->paragraphs, dset->ids, dset->size, verbose); accuracy = nlk_class_score_accuracy(pred, dset->classes, dset->size); free(pred); if(verbose) { printf("\naccuracy classifying train set: %f (%zu)\n", accuracy, dset->size); printf("Finished training\n"); } return accuracy; } /** * Convenience function */ float nlk_pv_classify_test(struct nlk_neuralnet_t *nn, const char *test_path, const bool verbose) { float ac = 0; float f1 = 0; float prec = 0; float rec = 0; unsigned int *pred; struct nlk_dataset_t *test_set = NULL; test_set = nlk_dataset_load_path(test_path); if(test_set == NULL) { NLK_ERROR("invalid test set", NLK_FAILURE); /* unreachable */ } pred = nlk_pv_classify(nn, nn->paragraphs, test_set->ids, test_set->size, verbose); ac = nlk_class_score_accuracy(pred, test_set->classes, test_set->size); f1 = nlk_class_score_semeval_senti_f1(pred, test_set->classes, test_set->size, 2, 0); if(verbose) { nlk_dataset_print_class_dist(test_set); printf("\nTEST SCORE (ACCURACY) = %f\n", ac); printf("TEST SCORE (SEMEVAL F1) = %f\n", f1); f1 = nlk_class_score_f1pr_class(pred, test_set->classes, test_set->size, 2, &prec, &rec); printf("\tpos: prec = %.3f, rec = %.3f, f1 = %.3f\n", prec, rec, f1); f1 = nlk_class_score_f1pr_class(pred, test_set->classes, test_set->size, 0, &prec, &rec); printf("\tneg: prec = %.3f, rec = %.3f, f1 = %.3f\n", prec, rec, f1); nlk_class_score_cm_print(pred, test_set->classes, test_set->size); } free(pred); nlk_dataset_free(test_set); return ac; }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void *operator new(size_t bytes) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class IfStmtBitfields { friend class IfStmt; unsigned : NumStmtBits; unsigned IsConstexpr : 1; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor friend class OpaqueValueExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; IfStmtBitfields IfStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; } void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, size_t) LLVM_NOEXCEPT {} void operator delete(void *, void *) LLVM_NOEXCEPT {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return const_child_range(child_iterator(Body), child_iterator(Body + CompoundStmtBits.NumStmts)); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return reinterpret_cast<const Attr *const *>(this + 1); } const Attr **getAttrArrayPtr() { return reinterpret_cast<const Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { INIT, VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { INIT, VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; /// \brief Const iterator that walks over the capture initialization /// arguments. typedef Expr *const *const_capture_init_iterator; typedef llvm::iterator_range<const_capture_init_iterator> const_capture_init_range; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
TranslationTable.h
// Modified from the original file of Chris Dyer // Copyright 2017 by Hao Wang, modified from the original code of Chris Dyer // provided in https://github.com/clab/fast_align/blob/master/src/ttables.h // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef TRANSLATION_TABLE_H #define TRANSLATION_TABLE_H typedef vector<W2Double>::const_iterator const_iterator; static double digamma(double x) { double result = 0, xx, xx2, xx4; for (; x < 7; ++x) result -= 1 / x; x -= 1.0 / 2.0; xx = 1.0 / x; xx2 = xx * xx; xx4 = xx2 * xx2; result += log(x) + (1. / 24.) * xx2 - (7.0 / 960.0) * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; return result; } class TranslationTable { public: TranslationTable() : initialized_(false), frozen_(false){}; TranslationTable(const TranslationTable &other) : initialized_(other.initialized_), frozen_(other.frozen_) { tt = other.tt; counts = other.counts; } ~TranslationTable(){}; inline double Prob(const unsigned &f, const unsigned &e) const { return initialized_ ? tt[f].find(e)->second : 1e-9; } inline double safe_Prob(const unsigned &f, const unsigned &e) const { if (f < static_cast<unsigned>(tt.size())) { const W2Double &cpd = tt[f]; const W2Double::const_iterator it = cpd.find(e); if (it == cpd.end()) return 1e-7; return it->second; } else return 1e-7; } inline void Insert(const unsigned f, const unsigned e) { // NOT thread safe if (f >= counts.size()) counts.resize(f + 1); counts[f][e] = 0; } inline void SetMaxF(const unsigned f) { // NOT thread safe if (f >= counts.size()) { counts.resize(f + 1); } } inline void safe_SetValue(const unsigned f, const unsigned e, const double x) { if (f >= tt.size()) tt.resize(f + 1); tt[f][e] = x; } inline void Increment(const unsigned f, const unsigned e, const double x) { // NOT thread safe counts[f].find(e)->second += x; } void Normalize() { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second; if (!total) total = 1; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second /= total; } ClearCounts(); } void NormalizeVB(const double alpha) { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second + alpha; if (!total) total = 1; const double digamma_total = digamma(total); for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second = exp(digamma(it->second + alpha) - digamma_total); } ClearCounts(); } void SetInitialized() { CHECK(!initialized_, "#ERROR! tt has been initialized."); if (!initialized_) { tt.resize(counts.size()); for (unsigned i = 0; i < counts.size(); i++) { tt[i] = counts[i]; } } initialized_ = true; } void Freeze() { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); frozen_ = true; } const_iterator begin() const { return tt.begin(); } const_iterator end() const { return tt.end(); } W2Double operator[](int i) const { return tt[i]; } size_t size() const { return tt.size(); } void clear() { counts.clear(); tt.clear(); frozen_ = false; initialized_ = false; } void Merge(TranslationTable &other, const bool &reverse = true) { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); //require this and other have same entries. #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { // remove NULL from F list W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); it++) { const unsigned e = it->first; const double score = (reverse) ? other.safe_Prob(e, f) : other.safe_Prob(f, e); it->second = sqrt(it->second * score); } } } void WriteTranslationTable(ofstream *file, const SimpleWordWrapper &sw2id, const SimpleWordWrapper &tw2id, const double &threshold, const bool &keepNull) { CHECK(frozen_, "#ERROR! tt has not frozen."); if ((*file).is_open()) { for (unsigned f = (keepNull) ? 0 : 1; f < tt.size(); f++) { for (auto const &e : tt[f]) { const double score = e.second; if (score >= threshold) (*file) << sw2id.decode(f) << " " << tw2id.decode(e.first) << " " << score << "\n"; } } (*file).close(); } } void LoadTranslationTable(ifstream *file, SimpleWordWrapper &sw2id, SimpleWordWrapper &tw2id) { CHECK(!frozen_, "#ERROR! tt has frozen."); int i; int effective = 0; string line; for (i = 0; !getline(*file, line).eof(); i++) { const vector<string> tokens = Split(line, " "); CHECK(tokens.size() == 3, "#ERROR! model format wrong: " + line); const string &f_str = tokens[0]; const string &e_str = tokens[1]; unsigned f = sw2id.encode(f_str); unsigned e = tw2id.encode(e_str); if (f && e) { safe_SetValue(f, e, stod(tokens[2])); effective++; } } initialized_ = true; (*file).close(); cerr << "# of entries: \t[" << i << "]" << endl; cerr << "# of effective entries:\t[" << effective << "]" << endl; } void ClearCounts() { #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < counts.size(); i++) for (auto &cnt : counts[i]) cnt.second = 0.0; } private: W2WDouble tt; W2WDouble counts; double threshold_ = 1e-6; bool initialized_; // Disallow new e,f pairs to be added to counts bool frozen_; // Disallow new e,f pairs to be added to counts }; #endif // TRANSLATION_TABLE_H
GB_binop__lt_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint64) // A*D function (colscale): GB (_AxD__lt_uint64) // D*A function (rowscale): GB (_DxB__lt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint64) // C=scalar+B GB (_bind1st__lt_uint64) // C=scalar+B' GB (_bind1st_tran__lt_uint64) // C=A+scalar GB (_bind2nd__lt_uint64) // C=A'+scalar GB (_bind2nd_tran__lt_uint64) // C type: bool // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT64 || GxB_NO_LT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <list> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) #ifndef INTEL_MKL #error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" #endif #endif #if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) #error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" #endif #ifdef INTEL_MKL_ML_ONLY #error "Please use INTEL MKL DNN (the default option for --config=mkl)." #endif #ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. // For use with MKL ML, has been deprecated typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; // The dimensions order that MKL-DNN internally uses for 2D activations // [Batch, Channel, Height, Width] and // for 2D filters [Out_Channel, In_Channel, Height, Width]. typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; // The dimensions order that MKL-DNN internally uses for 3D activations // [Batch, Channel, Depth, Height, Width] and // for 3D filters [Out_Channel, In_Channel, Depth, Height, Width]. typedef enum { Dim3d_N = 0, Dim3d_C = 1, Dim3d_D = 2, Dim3d_H = 3, Dim3d_W = 4, Dim3d_O = 0, Dim3d_I = 1 } MklDnnDims3D; // Enum for the order of dimensions of a TF 2D filter with shape [filter_height, // filter_width, in_channels, out_channels] typedef enum { TF_2DFILTER_DIM_H = 0, TF_2DFILTER_DIM_W = 1, TF_2DFILTER_DIM_I = 2, TF_2DFILTER_DIM_O = 3 } TFFilterDims2d; // Enum for the order of dimensions of a TF 3D filter with shape [filter_depth, // filter_height, filter_width, in_channels, out_channels] typedef enum { TF_3DFILTER_DIM_P = 0, TF_3DFILTER_DIM_H = 1, TF_3DFILTER_DIM_W = 2, TF_3DFILTER_DIM_I = 3, TF_3DFILTER_DIM_O = 4 } TFFilterDims3d; // The dimensions order that MKL-DNN requires for the filter in a grouped // convolution (2D only) typedef enum { MKL_GROUP_FILTER_DIM_G = 0, MKL_GROUP_FILTER_DIM_O = 1, MKL_GROUP_FILTER_DIM_I = 2, MKL_GROUP_FILTER_DIM_H = 3, MKL_GROUP_FILTER_DIM_W = 4 } MklDnnFilterGroupDims; // Enum used to templatize MklOp kernel implementations // that support both fp32 and int8 versions. enum class MklQuantization { QUANTIZED_VERSION, FP_VERSION, }; static const int kSmallBatchSize = 32; #ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format); TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline size_t GetDimension3D(char dimension) const { int index = GetMklDnnTensor3DDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int32 GetMklDnnTensor3DDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims3D::Dim3d_N; case 'C': return MklDnnDims3D::Dim3d_C; case 'D': return MklDnnDims3D::Dim3d_D; case 'H': return MklDnnDims3D::Dim3d_H; case 'W': return MklDnnDims3D::Dim3d_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { if (dimension == 5) { CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<3>(data_format, '0')] = MklDnnDims3D::Dim3d_D; data_.map_[GetTensorDimIndex<3>(data_format, '1')] = MklDnnDims3D::Dim3d_H; data_.map_[GetTensorDimIndex<3>(data_format, '2')] = MklDnnDims3D::Dim3d_W; data_.map_[GetTensorDimIndex<3>(data_format, 'C')] = MklDnnDims3D::Dim3d_C; data_.map_[GetTensorDimIndex<3>(data_format, 'N')] = MklDnnDims3D::Dim3d_N; } else { CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape(); // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } template <> memory::data_type MklDnnType<quint8>() { return memory::data_type::u8; } template <> memory::data_type MklDnnType<qint8>() { return memory::data_type::s8; } template <> memory::data_type MklDnnType<qint32>() { return memory::data_type::s32; } template <> memory::data_type MklDnnType<bfloat16>() { // TODO(nhasabni): Enable MKL-DNN bfloat16 type later. // Currently, falling back to f32 to get compilation working. return memory::data_type::f32; } /// Map TensorFlow's data format into MKL-DNN 3D data format /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::ndhwc; else if (format == FORMAT_NCHW) return memory::format::ncdhw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc || format == memory::format::ndhwc) return FORMAT_NHWC; else if (format == memory::format::nchw || format == memory::format::ncdhw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N')); int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C')); int d = shape.dim_size(GetTensorDimIndex<3>(format, '0')); int h = shape.dim_size(GetTensorDimIndex<3>(format, '1')); int w = shape.dim_size(GetTensorDimIndex<3>(format, '2')); // MKL-DNN requires dimensions in NCDHW format. return memory::dims({n, c, d, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimension with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; // flat to indicate if data is 3D or not. bool bIs3D; /// Operations temp buffer void* allocated_buffer_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), allocated_buffer_(nullptr), cpu_engine_(e) {} ~MklDnnData() { if (allocated_buffer_ != nullptr) { cpu_allocator()->DeallocateRaw(allocated_buffer_); } cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; } bool GetIs3D() { return bIs3D; } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); if (user_memory_) delete user_memory_; // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// allocate function for data buffer inline void AllocateBuffer(size_t size) { const int64 kMemoryAlginment = 64; // For AVX512 memory alignment. allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size); } inline void* GetAllocatedBuffer() { return allocated_buffer_; } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; // // LRUCache is a class which implements LRU (Least Recently Used) cache. // The implementation is similar to that of // tensorflow/core/platform/cloud/expiring_lru_cache.h // without its thread-safe part because the cache is supposed to be // used as thread local (for instance, MklPrimitive caching). // // The LRU list maintains objects in chronological order based on // creation time, with the least recently accessed object at the // tail of LRU list, while the most recently accessed object // at the head of LRU list. // // This class is used to maintain an upper bound on the total number of // cached items. When the cache reaches its capacity, the LRU item will // be removed and replaced by a new one from SetOp call. // template <typename T> class LRUCache { public: explicit LRUCache(size_t capacity) { capacity_ = capacity; Clear(); } T* GetOp(const string& key) { auto it = cache_.find(key); if (it == cache_.end()) { return nullptr; } // Move to the front of LRU list as the most recently accessed. lru_list_.erase(it->second.lru_iterator); lru_list_.push_front(it->first); it->second.lru_iterator = lru_list_.begin(); return it->second.op; } void SetOp(const string& key, T* op) { if (lru_list_.size() >= capacity_) { Delete(); } // Insert an entry to the front of the LRU list lru_list_.push_front(key); Entry entry(op, lru_list_.begin()); cache_.emplace(std::make_pair(key, std::move(entry))); } void Clear() { if (lru_list_.empty()) return; // Clean up the cache cache_.clear(); lru_list_.clear(); } private: struct Entry { // The entry's value. T* op; // A list iterator pointing to the entry's position in the LRU list. std::list<string>::iterator lru_iterator; // Constructor Entry(T* op, std::list<string>::iterator it) { this->op = op; this->lru_iterator = it; } // Move construcctor Entry(Entry&& source) noexcept : lru_iterator(std::move(source.lru_iterator)) { op = std::move(source.op); source.op = std::forward<T*>(nullptr); } // Destructor ~Entry() { if (op != nullptr) delete op; } }; // Remove the least recently accessed entry from LRU list, which // is the tail of lru_list_. Update cache_ correspondingly. bool Delete() { if (lru_list_.empty()) return false; string key = lru_list_.back(); lru_list_.pop_back(); cache_.erase(key); return true; } // Cache capacity size_t capacity_; // The cache, a map from string key to a LRU entry. std::unordered_map<string, Entry> cache_; // The LRU list of entries. // The front of the list contains the key of the most recently accessed // entry, while the back of the list is the least recently accessed entry. std::list<string> lru_list_; }; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() {} ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); return lru_cache.GetOp(key); } void SetOp(const string& key, MklPrimitive* op) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); lru_cache.SetOp(key, op); } /// Function to decide whether HW has AVX512 or AVX2 /// For those legacy device(w/o AVX512 and AVX2), /// MKL-DNN GEMM will be used. static inline bool IsLegacyPlatform() { return (!port::TestCPUFeature(port::CPUFeature::AVX512F) && !port::TestCPUFeature(port::CPUFeature::AVX2)); } /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } private: static inline LRUCache<MklPrimitive>& GetLRUCache() { static const int kCapacity = 1024; // cache capacity static thread_local LRUCache<MklPrimitive> lru_cache_(kCapacity); return lru_cache_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims& dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char*>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(string(s)); key_.append(1, delimiter); } }; static inline memory::format get_desired_format(int channel, bool is_2d = true) { memory::format fmt_desired = memory::format::any; if (port::TestCPUFeature(port::CPUFeature::AVX512F)) { fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { fmt_desired = is_2d ? memory::format::nChw8c : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } return fmt_desired; } class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext() : src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {} } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset( new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory& GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {} ~MklReorderPrimitiveFactory() {} static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const& from_desc = from->get_primitive_desc().desc().data; auto const& to_desc = to->get_primitive_desc().desc().data; const int KIdxFirstStride = 0; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); memory::dims from_strides( from_desc.layout_desc.blocking.strides[KIdxFirstStride], &from_desc.layout_desc.blocking .strides[KIdxFirstStride][from_desc.ndims]); memory::dims to_strides( to_desc.layout_desc.blocking.strides[KIdxFirstStride], &to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(from_strides); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); key_creator.AddAsKey(to_strides); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by /// from to memory pointed by to, it will created primitive or /// get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive* reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && ((strides[0] != 1) || (strides[1] != 1))); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
spmv_task.c
#include "ghost/config.h" #include "ghost/types.h" #include "ghost/locality.h" #include "ghost/densemat.h" #include "ghost/task.h" #include "ghost/taskq.h" #include "ghost/pumap.h" #include "ghost/machine.h" #include "ghost/util.h" #include "ghost/instr.h" #include "ghost/sparsemat.h" #include "ghost/spmv_solvers.h" #include "ghost/math.h" #ifdef GHOST_HAVE_MPI #include <mpi.h> #endif #include <sys/types.h> #include <string.h> #ifdef LIKWID #include <likwid.h> #endif #ifdef GHOST_HAVE_OPENMP #include <omp.h> #endif #ifdef GHOST_HAVE_MPI typedef struct { ghost_densemat *rhs; ghost_densemat_halo_comm *comm; ghost_context *ctx; } commArgs; static void *communicate(void *vargs) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION); commArgs *args = (commArgs *)vargs; ghost_error *ret = NULL; ghost_malloc((void **)&ret,sizeof(ghost_error)); // don't use macro because it would read *ret if (!ret) { goto err; } *ret = GHOST_SUCCESS; GHOST_CALL_GOTO(ghost_densemat_halocomm_start(args->rhs,args->ctx,args->comm),err,*ret); GHOST_CALL_GOTO(ghost_densemat_halocomm_finalize(args->rhs,args->ctx,args->comm),err,*ret); goto out; err: out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION); return ret; } typedef struct { ghost_sparsemat *mat; ghost_densemat *res; ghost_densemat *invec; ghost_spmv_opts spmvtraits; } compArgs; static void *computeLocal(void *vargs) { //#pragma omp parallel // GHOST_INFO_LOG("comp local t %d running @ core %d",ghost_ompGetThreadNum(),ghost_getCore()); //GHOST_FUNC_ENTER(GHOST_FUNCTYPE_MATH); ghost_error *ret = NULL; ghost_malloc((void **)&ret,sizeof(ghost_error)); // don't use macro because it would read *ret if (!ret) { goto err; } *ret = GHOST_SUCCESS; compArgs *args = (compArgs *)vargs; GHOST_CALL_GOTO(ghost_spmv_nocomm(args->res,args->mat,args->invec,args->spmvtraits),err,*ret); goto out; err: out: //GHOST_FUNC_EXIT(GHOST_FUNCTYPE_MATH); return ret; } #endif ghost_error ghost_spmv_taskmode(ghost_densemat* res, ghost_sparsemat* mat, ghost_densemat* invec, ghost_spmv_opts traits) { #ifndef GHOST_HAVE_MPI UNUSED(res); UNUSED(mat); UNUSED(invec); UNUSED(traits); GHOST_ERROR_LOG("Cannot execute this spMV solver without MPI"); return GHOST_ERR_UNKNOWN; #else GHOST_FUNC_ENTER(GHOST_FUNCTYPE_MATH); GHOST_INSTR_START("prepare"); ghost_error ret = GHOST_SUCCESS; ghost_spmv_opts localtraits = traits; ghost_spmv_opts remotetraits = traits; /* int remoteExists; ghost_nrank(&remoteExists,mat->context->mpicomm); remoteExists -= 1;*/ int remoteExists = mat->remotePart->nEnts > 0; //MPI_CALL_RETURN(MPI_Allreduce(MPI_IN_PLACE,&remoteExists,1,MPI_INT,MPI_MAX,mat->context->mpicomm)); if (remoteExists) { localtraits.flags |= (ghost_spmv_flags)GHOST_SPMV_LOCAL; remotetraits.flags |= (ghost_spmv_flags)GHOST_SPMV_REMOTE; } ghost_densemat_halo_comm comm = GHOST_DENSEMAT_HALO_COMM_INITIALIZER; commArgs cargs; compArgs cplargs; ghost_task *commTask; ghost_task *compTask; ghost_task_flags taskflags = GHOST_TASK_DEFAULT; ghost_task *parent = NULL; GHOST_CALL_RETURN(ghost_task_cur(&parent)); if (parent) { GHOST_DEBUG_LOG(1,"using the parent's cores for the task mode spmv solver"); ghost_task_create(&compTask, parent->nThreads - remoteExists, 0, &computeLocal, &cplargs, taskflags, NULL, 0); ghost_task_create(&commTask, remoteExists, 0, &communicate, &cargs, taskflags, NULL, 0); } else { GHOST_DEBUG_LOG(1,"No parent task in task mode spmv solver"); int nIdleCores; ghost_pumap_nidle(&nIdleCores,GHOST_NUMANODE_ANY); ghost_task_create(&compTask, 2/*nIdleCores-remoteExists*/, 0, &computeLocal, &cplargs, taskflags, NULL, 0); ghost_task_create(&commTask, 2/*remoteExists*/, 0, &communicate, &cargs, taskflags, NULL, 0); } cargs.rhs = invec; cargs.comm = &comm; cargs.ctx = mat->context; cplargs.mat = mat->localPart; cplargs.invec = invec; cplargs.res = res; cplargs.spmvtraits = localtraits; GHOST_INSTR_STOP("prepare"); GHOST_INSTR_START("haloassembly"); GHOST_CALL_GOTO(ghost_densemat_halocomm_init(invec,mat->context,&comm),err,ret); GHOST_INSTR_STOP("haloassembly"); GHOST_INSTR_START("both_tasks"); if (remoteExists) { ghost_task_enqueue(commTask); } ghost_task_enqueue(compTask); ghost_task_wait(compTask); if ((ret = *((ghost_error *)(compTask->ret))) != GHOST_SUCCESS) { goto err; } if (remoteExists) { ghost_task_wait(commTask); if ((ret = *((ghost_error *)(commTask->ret))) != GHOST_SUCCESS) { goto err; } } GHOST_INSTR_STOP("both_tasks"); GHOST_INSTR_START("remote"); if (remoteExists) { ghost_spmv_nocomm(res,mat->remotePart,invec,remotetraits); } GHOST_INSTR_STOP("remote"); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_MATH); goto out; err: out: free(compTask->ret); compTask->ret = NULL; free(commTask->ret); commTask->ret = NULL; ghost_task_destroy(compTask); ghost_task_destroy(commTask); return ret; #endif }
omp_for_t.c
#include <stdio.h> #include <omp.h> int main(int argc, char** argv){ int partial_Sum, total_Sum; #pragma omp parallel private(partial_Sum) shared(total_Sum) { partial_Sum = 0; total_Sum = 0; printf("Hello from process: %d\n", omp_get_thread_num()); for(int i = 1; i <= 10; i++){ partial_Sum += i; } //Create thread safe region. #pragma omp critical { //add each threads partial sum to the total sum total_Sum += partial_Sum; } } printf("Total Sum: %d\n", total_Sum); return 0; }
PlacementInfo.h
/** * @file PlacementInfo.h * @author Tingyuan LIANG (tliang@connect.ust.hk) * @brief This header file mainly contains the definition of class PlacementInfo, including information related to FPGA * placement (wirelength optimization, cell spreading, legalization, packing) * @version 0.1 * @date 2021-06-03 * * @copyright Copyright (c) 2021 Reconfiguration Computing Systems Lab, The Hong Kong University of Science and * Technology. All rights reserved. * */ #ifndef _PlacementINFO #define _PlacementINFO #include "DesignInfo.h" #include "DeviceInfo.h" #include "PlacementTimingInfo.h" #include "Eigen/Core" #include "Eigen/SparseCore" #include "dumpZip.h" #include <assert.h> #include <fstream> #include <iostream> #include <map> #include <mutex> #include <queue> #include <set> #include <sstream> #include <string> #include <thread> #include <vector> /** * @brief Information related to FPGA placement (wirelength optimization, cell spreading, legalization, packing) * * includes design information, device information * includes numerical information for: * wirelength optimization: Bound2Bound model, HPWL for nets, macros * cell spreading: bin grid, instance density, routing congestion, packability * legalization: mapping design instance to device site at coarse-grained level * packing: mapping fine-grained instances to sites (e.g., CLB site) * */ class PlacementInfo { public: /** * @brief Placement Instance Types * * UnpackedCell: the smallest, indivisible, representable component * Macro: a fixed group of multiple standard cells with constraints of their relative locations */ enum PlacementUnitType { PlacementUnitType_UnpackedCell = 0, PlacementUnitType_Macro }; class PlacementSiteTypeInfo; class PlacementBinInfo; class CompatiblePlacementTable; class PlacementNet; /** * @brief describes the type mapping from design to device, where a cell can be placed (which BEL in which site) * * Since different cell types can be mapped to a group of resource BEL types, we handle the mapping in the following * way, with a intermediate Shared BEL Type: * * cell type A => A => BEL type 1 * * cell type B => Shared => BEL type 2 * * cell type C => BEL => BEL type 3 * * cell type D => Type => BEL type 4 * */ class CompatiblePlacementTable { public: /** * @brief Construct a new Compatible Placement Table object * * @param cellType2fixedAmoFileName a file indicating how many resource BEL slots will be cost for a design cell * of specific type * @param cellType2sharedCellTypeFileName a file indicating the mapping from design cell types to device * resource type groups * @param sharedCellType2BELtypeFileName a file indicating the mapping from specific resource types to device * BEL resource slots * @param designInfo design inforamtion * @param deviceInfo device information */ CompatiblePlacementTable(std::string cellType2fixedAmoFileName, std::string cellType2sharedCellTypeFileName, std::string sharedCellType2BELtypeFileName, DesignInfo *designInfo, DeviceInfo *deviceInfo); ~CompatiblePlacementTable() { } /** * @brief the actual BEL types according to the definition file * */ std::vector<std::string> realBELTypes; /** * @brief each actual BEL type gets an integer ID for checking * */ std::map<std::string, int> realBELTypeName2ID; /** * @brief some BEL types (design cells should be mapped to the BEL slot on FPGA device) * * e.g., some BEL types are mapped to the same resource slots on FPGA device, such as SLICEL_LUT, * SLICEM_LUT,SLICEL_FF,SLICEM_FF,RAMB18E2_L,RAMB18E2_U,SLICEL_MUXF8,SLICEM_MUXF8... * We call them SharedBELType * */ std::vector<std::string> sharedCellBELTypes; /** * @brief the resource demand of specific types. (cost how many slot/BELs) * * the resource demand of specific types. (cost how many slot/BELs), e.g., LUT6 will take 2 * SLICEM_LUT slots in a CLB site. * */ std::map<DesignInfo::DesignCellType, int> cellType2sharedBELTypeOccupation; /** * @brief the mapping from design cell type to device resource type * * the mapping from design cell type to device resource type (we call SharedBELType). Such as LUT1-6 should be * mapped to SLICEM_LUT/SLICEL_LUT. * */ std::map<DesignInfo::DesignCellType, std::vector<std::string>> cellType2sharedBELTypes; /** * @brief the mapping from design cell type to device resource type ID * * string overhead is high. We use integer id to represent the BEL types. * */ std::map<DesignInfo::DesignCellType, std::vector<int>> cellType2sharedBELTypeIDs; /** * @brief The mapping from shared cell types to device site types * * For example, SLICEM_LUT should be mapped to SLICEM. * */ std::map<std::string, std::string> sharedCellType2SiteType; /** * @brief The mapping from shared cell types to device BEL types * * For example, SLICEM_LUT should be mapped to * A5LUT,A6LUT,B5LUT,B6LUT,C5LUT,C6LUT,D5LUT,D6LUT,E5LUT,E6LUT,F5LUT,F6LUT,G5LUT,G6LUT,H5LUT,H6LUT. On Xilinx * Ultrascale. * */ std::map<std::string, std::vector<std::string>> sharedCellType2BELNames; /** * @brief Get the Potential BEL Type IDs for a specific cell object * * @param cell * @return std::vector<int>& */ inline std::vector<int> &getPotentialBELTypeIDs(DesignInfo::DesignCell *cell) { // be careful, this function will remap the BELType according to the user configuration // for example, SLICEM_CARRY8 will be mapped to SLICEL_CARRY8, so they will be treated equally during cell // spreading. assert(cell); assert((unsigned int)cell->getCellId() < cellId2SharedCellBELTypeID.size()); return cellId2SharedCellBELTypeID[cell->getCellId()]; } /** * @brief Get the potential BEL Type IDs for a specific cell type * * @param cellType * @return std::vector<int>& */ inline std::vector<int> &getPotentialBELTypeIDs(DesignInfo::DesignCellType cellType) { // be careful, this function will not remap the BELType. return cellType2sharedBELTypeIDs[cellType]; } /** * @brief get the ID of SharedBELType from a string * * @param tmpStr * @return int */ inline int getSharedBELTypeId(std::string tmpStr) { assert(sharedCellBELTypeName2ID.find(tmpStr) != sharedCellBELTypeName2ID.end()); return sharedCellBELTypeName2ID[tmpStr]; } /** * @brief Get the theoratical occupation of a specific cell type * * @param cellType * @return float */ inline float getOccupation(DesignInfo::DesignCellType cellType) { return cellType2sharedBELTypeOccupation[cellType]; } /** * @brief Get the actual occupation of a specific cell * * @param cell * @return float * * it will be the multiplication of cellId2Occupation and cellId2InfationRatio. * cellId2Occupation might be adjusted by the packing feasibility. * cellId2InfationRatio might be adjusted by the routing congestion level. */ inline float getActualOccupation(DesignInfo::DesignCell *cell) { return cellId2Occupation[cell->getCellId()] * cellId2InfationRatio[cell->getCellId()]; } /** * @brief Get the inflate ratio of a cell * * @param cell * @return float */ inline float getInflateRatio(DesignInfo::DesignCell *cell) { return cellId2InfationRatio[cell->getCellId()]; } /** * @brief Get the Actual Occupation By Cell Id * * @param id * @return float */ inline float getActualOccupationByCellId(int id) { return cellId2Occupation[id] * cellId2InfationRatio[id]; } /** * @brief get the reference of cellId2Occupation for convenience * * @return std::vector<float>& */ inline std::vector<float> &getcellId2Occupation() { return cellId2Occupation; } /** * @brief get the reference of cellId2InfationRatio for convenience * * @return std::vector<float>& */ inline std::vector<float> &getcellId2InfationRatio() { return cellId2InfationRatio; } /** * @brief set the mapping from cells in design netlist to BEL type ID for later processing. * * set the mapping variable cellId2SharedCellBELTypeID for later processing * * @param designInfo * */ void setBELTypeForCells(DesignInfo *designInfo); /** * @brief forget the occupation adjustment by packing feasibility and routing congestion * */ void resetCellOccupationToDefault(); private: std::map<std::string, int> sharedCellBELTypeName2ID; std::vector<std::vector<int>> cellId2SharedCellBELTypeID; std::vector<float> cellId2Occupation; std::vector<float> cellId2InfationRatio; std::vector<float> defaultCellId2Occupation; DesignInfo *designInfo; DeviceInfo *deviceInfo; }; /** * @brief information for a site, e.g. what BEL in site and where are these kind of sites * * This class is temporarily not used. * */ class PlacementSiteTypeInfo { public: typedef struct location { float locX; float locY; } location; PlacementSiteTypeInfo(std::string siteType, std::vector<DeviceInfo::DeviceSite *> &correspondingSites) : siteType(siteType), correspondingSites(correspondingSites) { assert(correspondingSites.size() > 0); BELNames.clear(); for (DeviceInfo::DeviceBEL *curBEL : correspondingSites[0]->getChildrenBELs()) { BELNames.insert(curBEL->getBELType()); } potentialLocations.clear(); for (DeviceInfo::DeviceSite *curSite : correspondingSites) { location tmploc; tmploc.locX = curSite->X(); tmploc.locY = curSite->Y(); potentialLocations.push_back(tmploc); } } ~PlacementSiteTypeInfo() { potentialLocations.clear(); BELNames.clear(); } private: std::string siteType; std::set<std::string> BELNames; std::vector<location> potentialLocations; const std::vector<DeviceInfo::DeviceSite *> &correspondingSites; }; /** * @brief BEL bin for global placement for a specific shared BEL type * * To easier to find the neighbors of a design instance, we divide the design instances in the placement * into a grid of bins. A placement bin will record the instances and resource sites inside it. * */ class PlacementBinInfo { public: /** * @brief Construct a new Placement Bin Info object * * To construct a placement bin, we need to indicate its boundaries and target cell type for later information * processing. * * @param sharedCellType * @param leftX bin left boundary * @param rightX bin right boundary * @param bottomY bin bottom boundary * @param topY bin top boundary * @param row which row in the grid * @param column which column in the grid * @param compatiblePlacementTable * */ PlacementBinInfo(std::string sharedCellType, float leftX, float rightX, float bottomY, float topY, int row, int column, CompatiblePlacementTable *compatiblePlacementTable) : sharedCellType(sharedCellType), compatiblePlacementTable(compatiblePlacementTable), leftX(leftX), rightX(rightX), topY(topY), bottomY(bottomY), row(row), column(column) { correspondingSites.clear(); cells.clear(); } ~PlacementBinInfo() { correspondingSites.clear(); cells.clear(); } /** * @brief Get the shortest Manhattan distance from the bin to a specific location * * @param inX * @param inY * @return float */ inline float getManhattanDistanceTo(float inX, float inY) { if (leftX < inX && inX <= rightX && bottomY < inY && inY <= topY) { return 0; } if (leftX < inX && inX <= rightX) { return std::min(std::fabs(inY - topY), std::fabs(inY - bottomY)); } if (bottomY < inY && inY <= topY) { return std::min(std::fabs(inX - leftX), std::fabs(inX - rightX)); } return std::min(std::fabs(inX - leftX), std::fabs(inX - rightX)) + std::min(std::fabs(inY - topY), std::fabs(inY - bottomY)); } /** * @brief add a resource site into the bin * * check the resources in the site and increase the resource capacity of the bin * * @param curSite * */ void addSiteIntoBin(DeviceInfo::DeviceSite *curSite); /** * @brief check if the bin covers a given location on the device * * @param x * @param y * @return true if the bin covers the given location on the device * @return false if the bin does not cover the given location on the device */ inline bool inRange(float x, float y) { return (x < rightX && x >= leftX && y < topY && y >= bottomY); } inline bool inRangeY(float y) { return (y < topY && y >= bottomY); } /** * @brief add a design cell into the bin * * we have to set the mutex locked during the process since we enable multi-threading * in the placer. * * @param cell * @param occupationAdded how many slots will the cell occupy * */ inline void addCell(DesignInfo::DesignCell *cell, int occupationAdded) { mtx.lock(); assert(cells.find(cell) == cells.end()); cells.insert(cell); assert(occupationAdded >= 0); utilization += occupationAdded; mtx.unlock(); } /** * @brief remove a design cell from the bin * * @param cell * @param occupationAdded how many slots were occupied by the cell * * we have to set the mutex locked during the process since we enable multi-threading * in the placer. */ inline void removeCell(DesignInfo::DesignCell *cell, int occupationAdded) { // if (cell) mtx.lock(); assert(cells.find(cell) != cells.end()); cells.erase(cell); utilization -= occupationAdded; assert(utilization >= 0); mtx.unlock(); } inline bool contains(DesignInfo::DesignCell *cell) { // if (cell) return cells.find(cell) != cells.end(); } inline void reset() { cells.clear(); utilization = 0; binShrinkRatio = requiredBinShrinkRatio; overflowCnt = 0; noOverflowCnt = 0; switchDemandForNets = 0; switchSupplyForNets = 0; } /** * @brief reduce the resource capacity by a given ratio * * @param r */ inline void shrinkBinBy(float r) { binShrinkRatio *= (1 - r); } /** * @brief increase the resource capacity by a given ratio * * @param r */ inline void inflateBinBy(float r) { binShrinkRatio *= (1 + r); } inline void resetBinShrinkRatio() { binShrinkRatio = requiredBinShrinkRatio; } inline float getRequiredBinShrinkRatio() { return requiredBinShrinkRatio; } /** * @brief Set the Required Bin Shrink Ratio for a bin * * For example, to resolve routing congestion, we will modify the default resource capacity of a bin * * @param r */ inline void setRequiredBinShrinkRatio(float r) { requiredBinShrinkRatio = r; } /** * @brief Get the reference of the set of cells in the bin * * @return std::set<DesignInfo::DesignCell *>& */ inline std::set<DesignInfo::DesignCell *> &getCells() { return cells; } inline float getBinShrinkRatio() { return binShrinkRatio; } /** * @brief Get the Utilization Rate: utilization / (capacity * binShrinkRatio) * * @return float */ inline float getUtilizationRate() { if (capacity == 0) return utilization / 0.01; assert(capacity != 0); return (float)utilization / (capacity * binShrinkRatio); } /** * @brief Get the theoratical utilization rate (use LUT theoratical resource utilization without any adjustment * to dump bin information) * * @return float */ inline float getRealUtilizationRate() { if (cells.size()) { std::vector<DesignInfo::DesignCell *> cellVec; cellVec.clear(); for (auto cell : cells) { cellVec.push_back(cell); } if (cellVec[0]->isLUT()) { float tmputilization = 0; for (auto cell : cells) { if (cell->isLUT6()) tmputilization += 2.0; else tmputilization += 1; } return (float)tmputilization / capacity; } else { return getUtilizationRate(); } } return 0; } inline float getUtilization() { return (float)utilization; } inline float getCapacity() { return (float)capacity * binShrinkRatio; } /** * @brief check whether the resource demand in the bin is higher than the supply. * * @param overflowThreshold * @return true if the resource supply is enough. * @return false if the resource supply is not enough. */ inline bool isOverflow(float overflowThreshold) { if (capacity == 0) { if (utilization == 0) return false; else return true; } assert(utilization >= 0); assert(capacity != 0); return ((float)utilization / (capacity)) > overflowThreshold + eps; // return ((float)utilization / (binShrinkRatio * capacity)) > overflowThreshold + eps; } /** * @brief check whether we can add some BEL demand to the bin * * @param BELAmo * @return true when there are available resources meeting the BEL demand. * @return false when there is no available resource meeting the BEL demand. */ inline bool canAddMore(int BELAmo) { if (capacity == 0) return false; return ((float)(utilization + BELAmo) / (binShrinkRatio * capacity)) <= 1.00 + eps; } /** * @brief return the row of the bin in the grid * * @return int */ inline int Y() { return row; } /** * @brief return the column of the bin in the grid * * @return int */ inline int X() { return column; } /** * @brief return the left boundary of the bin * * @return float */ inline float left() { return leftX; } /** * @brief return the right boundary of the bin * * @return float */ inline float right() { return rightX; } /** * @brief return the top boundary of the bin * * @return float */ inline float top() { return topY; } /** * @brief return the bottom boundary of the bin * * @return float */ inline float bottom() { return bottomY; } inline std::string getType() { return sharedCellType; } /** * @brief increase one time of overflow situation * */ inline void countOverflow() { overflowCnt++; } /** * @brief increase one time of non-overflow situation * */ inline void countNoOverflow() { if (noOverflowCnt < 100) { noOverflowCnt++; } } inline void resetNoOverflowCounter() { noOverflowCnt = 0; } inline void resetOverflowCounter() { overflowCnt = 0; } inline int getNoOverflowCounter() { return noOverflowCnt; } inline int getOverflowCounter() { return overflowCnt; } /** * @brief Get the reference of the set of sites in the bin * * @return std::vector<DeviceInfo::DeviceSite *>& */ inline std::vector<DeviceInfo::DeviceSite *> &getCorrespondingSites() { return correspondingSites; } inline std::string &getSharedCellType() { return sharedCellType; } /** * @brief increase the net routing demand of the bin * * @param additionalDemand */ inline void increaseSWDemandBy(float additionalDemand) { switchDemandForNets += additionalDemand; } /** * @brief get the net routing demand of the bin * */ inline float getSwitchDemandForNets() const { return switchDemandForNets; } /** * @brief Set the clock region X for this bin * * @param _x */ inline void setClockRegionX(int _x) { clockRegionX = _x; } /** * @brief Get the clock region X for this bin * */ inline int getClockRegionX() { return clockRegionX; } private: std::string sharedCellType; std::vector<DeviceInfo::DeviceSite *> correspondingSites; CompatiblePlacementTable *compatiblePlacementTable; std::set<DesignInfo::DesignCell *> cells; int capacity = 0; int utilization = 0; float binShrinkRatio = 1.0; float requiredBinShrinkRatio = 1.0; const float leftX; const float rightX; const float topY; const float bottomY; float eps = 1e-5; const int row; const int column; int overflowCnt = 0; int noOverflowCnt = 0; float switchDemandForNets = 0.0; float switchSupplyForNets = 0.0; // only consider the general sites (DSP/BRAM/SLICE) std::mutex mtx; int clockRegionX = -1; }; /** * @brief BEL bin for global placement for multiple specific shared BEL types * * This bin class is not for a specific cell type. * It is not used in the current implementation. */ class PlacementHybridBinInfo { public: PlacementHybridBinInfo(PlacementBinInfo *curBin) : leftX(curBin->left()), rightX(curBin->right()), topY(curBin->top()), bottomY(curBin->bottom()), row(curBin->Y()), column(curBin->X()) { mergedBins.clear(); correspondingSites.clear(); cells.clear(); mergedBins.push_back(curBin); } ~PlacementHybridBinInfo() { correspondingSites.clear(); cells.clear(); } inline bool inRange(float x, float y) { return (x <= rightX && x > leftX && y <= topY && y > bottomY); } inline bool inRangeY(float y) { return (y <= topY && y > bottomY); } inline void addCell(DesignInfo::DesignCell *cell, int occupationAdded) { // if (cell) cells.insert(cell); utilization += occupationAdded; } inline void removeCell(DesignInfo::DesignCell *cell, int occupationAdded) { // if (cell) assert(cells.find(cell) != cells.end()); cells.erase(cell); utilization -= occupationAdded; } inline bool contains(DesignInfo::DesignCell *cell) { // if (cell) return cells.find(cell) != cells.end(); } inline void reset() { cells.clear(); utilization = 0; } inline std::set<DesignInfo::DesignCell *> &getCells() { return cells; } inline float getUtilizationRate() { if (capacity == 0) return utilization / 0.01; assert(capacity != 0); return (float)binShrinkRatio * utilization / capacity; } inline float getUtilization() { return (float)utilization; } inline float getCapacity() { return (float)capacity / binShrinkRatio; } inline bool isOverflow() { if (capacity == 0) { if (utilization == 0) return false; else return true; } assert(capacity != 0); return ((float)binShrinkRatio * utilization / capacity) > 1.00 + eps; } inline bool canAddMore(int BELAmo) { if (capacity == 0) return false; return ((float)binShrinkRatio * (utilization + BELAmo) / capacity) <= 1.00 + eps; } inline void setYX(int i, int j) { row = i; column = j; } inline int Y() { return row; } inline int X() { return column; } inline float left() { return leftX; } inline float right() { return rightX; } inline float top() { return topY; } inline float bottom() { return bottomY; } private: std::vector<DeviceInfo::DeviceSite *> correspondingSites; std::vector<PlacementBinInfo *> mergedBins; std::set<DesignInfo::DesignCell *> cells; int capacity = 0; int utilization = 0; float binShrinkRatio = 1.0; float leftX; float rightX; float topY; float bottomY; float eps = 1e-5; int row; int column; }; /** * @brief a movement unit in placement with information of location and resource demand * */ class PlacementUnit { public: PlacementUnit(std::string name, int id, PlacementUnitType unitType) : name(name), id(id), unitType(unitType) { } virtual ~PlacementUnit() { } void getAnchorLocation(float &x, float &y) { x = anchorX; y = anchorY; } inline float X() { return anchorX; } inline float Y() { return anchorY; } inline float lastX() { return lastAnchorX; } inline float lastY() { return lastAnchorY; } /** * @brief Set the Anchor Location for the PlacementUnit * * meanwhile, if it is not the first iteration in placement, record last anchor location * * @param x * @param y */ inline void setAnchorLocation(float x, float y) { assert(!locked); if (anchorX < -100 && anchorY < -100) { lastAnchorX = x; lastAnchorY = y; } else { lastAnchorX = anchorX; lastAnchorY = anchorY; } anchorX = x; anchorY = y; } /** * @brief Set the Spread Location based on forgetting ratio * * @param x * @param y * @param forgetRatio how much should the PlacementUnit forget the original location from later iteration */ inline void setSpreadLocation(float x, float y, float forgetRatio) { assert(!locked); if (lastSpreadX > -100 && lastSpreadY > -100) { assert(forgetRatio <= 1); x = x * forgetRatio + lastSpreadX * (1 - forgetRatio); y = y * forgetRatio + lastSpreadY * (1 - forgetRatio); // lastSpreadX = x; // lastSpreadY = y; } lastAnchorX = x; lastAnchorY = y; anchorX = x; anchorY = y; } inline void setAnchorLocationAndForgetTheOriginalOne(float x, float y) { assert(!locked); lastAnchorX = x; lastAnchorY = y; anchorX = x; anchorY = y; } inline void recordSpreadLocatin() { lastSpreadX = anchorX; lastSpreadY = anchorY; } inline void setFixed() { assert(!locked); fixed = true; } inline void setUnfixed() { assert(!locked); fixed = false; } inline void setLocked() { locked = true; } inline void setUnlocked() { locked = false; } inline bool isLocked() { return locked; } inline void setPlaced() { assert(!locked); placed = true; } inline bool isFixed() { return fixed; } inline bool isPlaced() { return placed; } inline std::string &getName() { return name; } inline PlacementUnitType getType() { return unitType; } inline void setWeight(int numCell) { weight = numCell; } inline int getWeight() { return weight; } inline unsigned int getId() { return id; } inline void renewId(int newId) { id = newId; } /** * @brief Set the Nets Set Ptr object which records the nets connecting to the PlacementUnit * * @param _nets */ inline void setNetsSetPtr(std::vector<PlacementNet *> *_nets) { nets = _nets; } /** * @brief Get the Nets Set Ptr object which records the nets connecting to the PlacementUnit * * @return std::vector<PlacementNet *>* */ inline std::vector<PlacementNet *> *getNetsSetPtr() { return nets; } inline int getUnitsBeDrivenByThisPU() { int res = 0; for (auto tmpNet : *nets) { if (tmpNet->getDriverUnits().size() == 1) { if (tmpNet->getDriverUnits()[0] == this) { res += tmpNet->getUnitsBeDriven().size(); } } } return res; } inline void addDSP() { DSPcnt++; } inline void addBRAM() { BRAMcnt++; } inline void addLUTRAM() { LUTRAMcnt++; } inline void addLUT() { LUTcnt++; } inline void addFF() { FFcnt++; } inline void addCARRY() { CARRYcnt++; } inline void addMUX() { MUXcnt++; } inline bool checkHasDSP() { return DSPcnt; } inline bool checkHasBRAM() { return BRAMcnt; } inline bool checkHasLUTRAM() { return LUTRAMcnt; } inline bool checkHasLUT() { return LUTcnt; } inline bool checkHasFF() { return FFcnt; } inline bool checkHasCARRY() { return CARRYcnt; } inline bool checkHasMUX() { return MUXcnt; } inline bool hasRegister() { return (checkHasFF() || checkHasDSP() || checkHasBRAM() || checkHasLUTRAM()); } inline bool hasLogic() { return (checkHasFF() || checkHasDSP() || checkHasBRAM() || checkHasLUTRAM() || checkHasCARRY() || checkHasLUT()); } inline bool isMCLB() { return LUTRAMcnt; } inline bool isLCLB() { return LUTcnt > 0 && CARRYcnt == 0; // LogicCLB with Carray will be handled by macroLegalizer } inline int getDSPNum() { return DSPcnt; } inline int getBRAMNum() { return BRAMcnt; } inline int getLUTRAMNum() { return LUTRAMcnt; } inline int getLUTNum() { return LUTcnt; } inline int getCARRYNum() { return CARRYcnt; } inline int getMUXNum() { return MUXcnt; } inline void setPacked() { packed = true; } inline void resetPacked() { packed = false; } inline bool isPacked() { return packed; } inline std::set<DesignInfo::DesignNet *> &getClockNets() { return clockNets; } protected: /** * @brief record the clock nets connected to this PlacementUnit * */ std::set<DesignInfo::DesignNet *> clockNets; private: std::string name; int id; float anchorX = -2000, anchorY = -2000; float lastAnchorX = -2000, lastAnchorY = -2000; float lastSpreadX = -2000, lastSpreadY = -2000; PlacementUnitType unitType; /** * @brief record the nets connected to this PlacementUnit * */ std::vector<PlacementNet *> *nets; /** * @brief fixed simply means the PlacementUnit cannot be moved. * */ bool fixed = false; /** * @brief the PlacementUnit is placed to BEL slot. * * Currently, this attribute is not used. * */ bool placed = false; /** * @brief if locked, the attributes of the PlacementUnit cannot be changed (more than fixed.) * */ bool locked = false; int weight = 1; int DSPcnt = 0; int BRAMcnt = 0; int LUTRAMcnt = 0; int LUTcnt = 0; int FFcnt = 0; int CARRYcnt = 0; int MUXcnt = 0; bool packed = false; }; /** * @brief the smallest, indivisible, representable component. It will include only one standard cell * */ class PlacementUnpackedCell : public PlacementUnit { public: /** * @brief Construct a new Placement Unpacked Cell object * * @param name * @param id a unique ID for this placement unit * @param cell */ PlacementUnpackedCell(std::string name, int id, DesignInfo::DesignCell *cell) : PlacementUnit(name, id, PlacementUnitType_UnpackedCell), cell(cell) { if (cell->isBRAM()) addBRAM(); if (cell->isDSP()) addDSP(); if (cell->isFF()) addFF(); if (cell->isLUTRAM() || cell->originallyIsLUTRAM()) addLUTRAM(); if (cell->isLUT()) addLUT(); if (cell->isCarry()) addCARRY(); if (cell->isMux()) addMUX(); clockNets = cell->getClockNets(); } ~PlacementUnpackedCell() { } void setLockedAt(std::string _siteName, std::string _BELName, DeviceInfo *deviceInfo, bool lock = true) { setFixed(); setPlaced(); siteName = _siteName; BELName = _BELName; setAnchorLocation(deviceInfo->getSite(siteName)->X(), deviceInfo->getSite(siteName)->Y()); deviceInfo->getSite(siteName)->setOccupied(); if (lock) setLocked(); } inline DesignInfo::DesignCell *getCell() { return cell; } inline std::string getFixedBELName() { return BELName; } inline std::string getFixedSiteName() { return siteName; } private: DesignInfo::DesignCell *cell; // DesignInfo::DesignCellType virtualCellType; std::string siteName; std::string BELName; }; /** * @brief a fixed group of multiple standard cells with constraints of their relative locations * */ class PlacementMacro : public PlacementUnit { public: enum PlacementMacroType { PlacementMacroType_LUTFFPair = 0, // LUT-FF pair during incremental packing PlacementMacroType_FFFFPair, // FF-FF pair during incremental packing PlacementMacroType_HALFCLB, PlacementMacroType_LCLB, // vendor defined primitives, like FFs for system reset, have to be placed in // SLICEL PlacementMacroType_MCLB, // LUTRAM cells sharing address/data bits or vendor defined primitives have to be // placed in SLICEM PlacementMacroType_CARRY, // chained CARRYs, its directly connected LUTs/FFs and routing BELs. PlacementMacroType_DSP, // Cascaded DSPs PlacementMacroType_BRAM, // Cascaded BRAMs PlacementMacroType_MUX7, // MUXF7, its directly connected LUTs/FFs and routing BELs. PlacementMacroType_MUX8, // MUXF7, its directly connected MUXF7/LUTs/FFs and routing BELs. PlacementMacroType_MUX9 // MUXF9, its directly connected MUXF7/MUXF8/LUTs/FFs and routing BELs. }; PlacementMacro(std::string name, int id, PlacementMacroType macroType) : PlacementUnit(name, id, PlacementUnitType_Macro), macroType(macroType) { fixedCells.clear(); cell2IdInMacro.clear(); cellsInMacro.clear(); cellSet.clear(); // siteOccupations.clear(); top = -10000; bottom = 10000; left = 10000; right = -10000; }; ~PlacementMacro() { } inline bool hasCell(DesignInfo::DesignCell *curCell) { return cellSet.find(curCell) != cellSet.end(); } /** * @brief add a real cell into the macro with its offsets in the macro * * @param curCell * @param cellType update the cell's "virtual" cell type so it can occupy specific resource (e.g., make an LUT1 * an LUT6) * @param x * @param y */ inline void addCell(DesignInfo::DesignCell *curCell, DesignInfo::DesignCellType cellType, float x, float y) { if (curCell) { if (curCell->isBRAM()) addBRAM(); if (curCell->isDSP()) addDSP(); if (!curCell->isVirtualCell()) { if (curCell->isFF()) addFF(); } if (curCell->isLUTRAM() || curCell->originallyIsLUTRAM()) addLUTRAM(); if (curCell->isLUT()) addLUT(); if (curCell->isCarry()) addCARRY(); if (curCell->isMux()) addMUX(); curCell->setVirtualType(cellType); cell2IdInMacro[curCell] = offsetX.size(); cellSet.insert(curCell); for (auto tmpNet : curCell->getClockNets()) { clockNets.insert(tmpNet); } } else { assert(false && "unexpected"); } cellsInMacro.push_back(curCell); cells_Type.push_back(cellType); offsetX.push_back(x); offsetY.push_back(y); if (x < left) left = x; if (x > right) right = x; if (y < bottom) bottom = y; if (y > top) top = y; } /** * @brief add a virtual cell with a given name into the macro with its offsets in the macro. Usually it is to * occupy routing resource in a site. * * @param virtualCellName the name of the virtual cell to be added into the design * @param designInfo since we are creating virtual cell, it should be added into design information * @param cellType the type of the virtual cell type * @param x * @param y * @return DesignInfo::DesignCell* */ inline DesignInfo::DesignCell *addVirtualCell(std::string virtualCellName, DesignInfo *designInfo, DesignInfo::DesignCellType cellType, float x, float y) { DesignInfo::DesignCell *vCell = new DesignInfo::DesignCell(true, virtualCellName, cellType, designInfo->getNumCells()); designInfo->addCell(vCell); // add the virtual cell to design info for later processing cells_Type.push_back(cellType); cellsInMacro.push_back(vCell); cell2IdInMacro[vCell] = offsetX.size(); cellSet.insert(vCell); offsetX.push_back(x); offsetY.push_back(y); if (x < left) left = x; if (x > right) right = x; if (y < bottom) bottom = y; if (y > top) top = y; if (vCell->isBRAM()) addBRAM(); if (vCell->isDSP()) addDSP(); // if (vCell->isFF()) // addFF(); if (vCell->isLUTRAM() || vCell->originallyIsLUTRAM()) addLUTRAM(); if (vCell->isLUT()) addLUT(); if (vCell->isCarry()) addCARRY(); if (vCell->isMux()) addMUX(); for (auto tmpNet : vCell->getClockNets()) { clockNets.insert(tmpNet); } return vCell; } /** * @brief add a virtual cell without given name into the macro with its offsets in the macro. Usually it is to * occupy routing resource in a site. * * @param designInfo since we are creating virtual cell, it should be added into design information * @param cellType the type of the virtual cell type * @param x * @param y */ inline void addVirtualCell(DesignInfo *designInfo, DesignInfo::DesignCellType cellType, float x, float y) { DesignInfo::DesignCell *vCell = new DesignInfo::DesignCell(true, cellType, designInfo->getNumCells()); designInfo->addCell(vCell); // add the virtual cell to design info for later processing cells_Type.push_back(cellType); cellsInMacro.push_back(vCell); cell2IdInMacro[vCell] = offsetX.size(); cellSet.insert(vCell); offsetX.push_back(x); offsetY.push_back(y); if (x < left) left = x; if (x > right) right = x; if (y < bottom) bottom = y; if (y > top) top = y; if (vCell->isBRAM()) addBRAM(); if (vCell->isDSP()) addDSP(); // if (vCell->isFF()) // addFF(); if (vCell->isLUTRAM() || vCell->originallyIsLUTRAM()) addLUTRAM(); if (vCell->isLUT()) addLUT(); if (vCell->isCarry()) addCARRY(); if (vCell->isMux()) addMUX(); for (auto tmpNet : vCell->getClockNets()) { clockNets.insert(tmpNet); } } inline std::vector<DesignInfo::DesignCell *> &getCells() { return cellsInMacro; } /** * @brief some constaints of elements' relative locations are defined by the design. We need to record this. * */ typedef struct _fixedPlacementInfo_inMacro { _fixedPlacementInfo_inMacro(DesignInfo::DesignCell *cell, std::string siteName, std::string BELName) : cell(cell), siteName(siteName), BELName(BELName) { } DesignInfo::DesignCell *cell; std::string siteName; std::string BELName; } fixedPlacementInfo_inMacro; /** * @brief add information of a fixed cell * * @param cell * @param siteName * @param BELName */ inline void addFixedCellInfo(DesignInfo::DesignCell *cell, std::string siteName, std::string BELName) { fixedCells.emplace_back(cell, siteName, BELName); } inline float getCellOffsetXInMacro(DesignInfo::DesignCell *cell) { assert(cell2IdInMacro.find(cell) != cell2IdInMacro.end()); return offsetX[cell2IdInMacro[cell]]; } inline float getCellOffsetYInMacro(DesignInfo::DesignCell *cell) { assert(cell2IdInMacro.find(cell) != cell2IdInMacro.end()); return offsetY[cell2IdInMacro[cell]]; } // bool setFixedCombination(std::vector<DesignInfo::DesignCell *> _cells, std::vector<std::string> _siteNames, // std::vector<std::string> _BELNames, DeviceInfo *designInfo); /** * @brief Get the virtual cell information, including offsets and cell type * * @param vId * @param x * @param y * @param cellType */ inline void getVirtualCellInfo(int vId, float &x, float &y, DesignInfo::DesignCellType &cellType) { x = offsetX[vId]; y = offsetY[vId]; cellType = cells_Type[vId]; } inline DesignInfo::DesignCellType getVirtualCellType(int vId) { return cells_Type[vId]; } inline int getNumOfCells() { return cells_Type.size(); } DesignInfo::DesignCell *getCell(unsigned int id) { assert(id < cellsInMacro.size()); return cellsInMacro[id]; } inline float getTopOffset() { return top; } inline float getBottomOffset() { return bottom; } inline float getLeftOffset() { return left; } inline float getRightOffset() { return right; } /** * @brief for site-level cell spreading * * not used in current implmementation. * * @param siteOffset * @param occ */ inline void addOccupiedSite(float siteOffset, float occ) { // siteOccupations.push_back(std::pair<float, float>(siteOffset, occ)); } // inline std::vector<std::pair<float, float>> &getOccupiedSiteInfo() // { // return siteOccupations; // } inline bool isCellInMacro(DesignInfo::DesignCell *curCell) { return cellSet.find(curCell) != cellSet.end(); } inline PlacementMacroType getMacroType() { return macroType; } inline std::vector<fixedPlacementInfo_inMacro> &getFixedCellInfoVec() { return fixedCells; } private: // std::vector<std::string> siteNames; // std::vector<std::string> BELNames; std::set<DesignInfo::DesignCell *> cellSet; std::map<DesignInfo::DesignCell *, int> cell2IdInMacro; std::vector<DesignInfo::DesignCell *> cellsInMacro; std::vector<DesignInfo::DesignCellType> cells_Type; // BELTypeOccupation.size()>=realCells.size() because somtime a cell or a connection net // can occupy multiple BEL std::vector<float> offsetX, offsetY; // offsetX.size() == BELTypeOccupation.size() std::vector<fixedPlacementInfo_inMacro> fixedCells; // std::vector<std::pair<float, float>> siteOccupations; float left, right, top, bottom; PlacementMacroType macroType; }; /** * @brief Placement net, compared to design net, includes information related to placement. * * Placement net, compared to design net, includes information related to placement: HPWL bounding box, * interconnection between placement units (unpacked/macro), APIs to check wirelength. * * Please note that PlacementNet is HyperEdge, connecting to multiple pins. * */ class PlacementNet { public: /** * @brief Construct a new Placement Net object * * @param designNet each PlacementNet is binded to design net. * @param id * @param cellId2PlacementUnitVec */ PlacementNet(DesignInfo::DesignNet *designNet, int id, std::vector<PlacementUnit *> &cellId2PlacementUnitVec, PlacementInfo *placementInfo) : designNet(designNet), id(id), placementInfo(placementInfo) { unitsOfNetPins.clear(); unitsOfDriverPins.clear(); unitsOfPinsBeDriven.clear(); PUSet.clear(); for (DesignInfo::DesignPin *curPin : designNet->getPins()) { PlacementUnit *tmpPU = cellId2PlacementUnitVec[curPin->getCell()->getElementIdInType()]; unitsOfNetPins.push_back(tmpPU); PUSet.insert(tmpPU); if (curPin->isOutputPort()) { unitsOfDriverPins.push_back(tmpPU); } else { unitsOfPinsBeDriven.push_back(tmpPU); } if (tmpPU->getType() == PlacementUnitType_UnpackedCell) { pinOffset tmpPinOffset = pinOffset(curPin->getOffsetXInCell(), curPin->getOffsetYInCell()); pinOffsetsInUnit.push_back(tmpPinOffset); } else if (tmpPU->getType() == PlacementUnitType_Macro) { PlacementMacro *tmpM = dynamic_cast<PlacementMacro *>(tmpPU); assert(tmpM); pinOffset tmpPinOffset = pinOffset(curPin->getOffsetXInCell() + tmpM->getCellOffsetXInMacro(curPin->getCell()), curPin->getOffsetYInCell() + tmpM->getCellOffsetYInMacro(curPin->getCell())); pinOffsetsInUnit.push_back(tmpPinOffset); } } leftPuId = rightPuId = topPuId = bottomPuId = -1; } ~PlacementNet() { } typedef struct _pinOffset { _pinOffset(float x, float y) : x(x), y(y) { } float x = 0.0, y = 0.0; } pinOffset; /** * @brief Get the reference of the vector of PlacementUnits connected to the net * The placement units in the vector might be duplicated because a net might connect to multiple pins of a unit * @return std::vector<PlacementUnit *>& */ inline std::vector<PlacementUnit *> &getUnits() { return unitsOfNetPins; } /** * @brief Get the reference of the vector of the driver units that drive the net * * @return std::vector<PlacementUnit *>& */ inline std::vector<PlacementUnit *> &getDriverUnits() { return unitsOfDriverPins; } /** * @brief Get the reference of the vector of the PlacementUnits driven by the net * * @return std::vector<PlacementUnit *>& */ inline std::vector<PlacementUnit *> &getUnitsBeDriven() { return unitsOfPinsBeDriven; } /** * @brief Get the Design Net object * * @return DesignInfo::DesignNet* */ inline DesignInfo::DesignNet *getDesignNet() { return designNet; } /** * @brief Get the Id of the net in current placement procedure * * @return int */ inline int getId() { return id; } /** * @brief Get the Pin Offsets (x,y) of the Units object * * @return std::vector<pinOffset>& */ inline std::vector<pinOffset> &getPinOffsetsInUnit() { return pinOffsetsInUnit; } /** * @brief update the bounding box of the net * * @param updateX if true, update the bounding box of the net in X coordinate * @param updateY if true, update the bounding box of the net in Y coordinate * @return true if the pins of the net is not at the same location * @return false if all pins of the net is at the same location */ inline bool updateNetBounds(bool updateX, bool updateY) { if (updateX) { leftPUX = 1e5; rightPUX = -1e5; leftPinX = 1e5; rightPinX = -1e5; for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPUId = tmpPU->getId(); auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float cellX = tmpPU->X(); float pinX = tmpPU->X() + tmpPinOffset.x; if (pinX < leftPinX) { leftPinX = pinX; leftPUX = cellX; leftPuId = tmpPUId; leftPinId_net = pinId_net; } if (pinX > rightPinX) { rightPinX = pinX; rightPUX = cellX; rightPuId = tmpPUId; rightPinId_net = pinId_net; } } } if (updateY) { topPUY = -1e5; bottomPUY = 1e5; topPinY = -1e5; bottomPinY = 1e5; for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPUId = tmpPU->getId(); auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float cellY = tmpPU->Y(); float pinY = tmpPU->Y() + tmpPinOffset.y; if (pinY < bottomPinY) { bottomPinY = pinY; bottomPUY = cellY; bottomPuId = tmpPUId; bottomPinId_net = pinId_net; } if (pinY > topPinY) { topPinY = pinY; topPUY = cellY; topPuId = tmpPUId; topPinId_net = pinId_net; } } } return (updateX && (leftPuId != rightPuId)) || (updateY && (topPuId != bottomPuId)); } /** * @brief get current HPWL of the net * * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @return float */ inline float getHPWL(float y2xRatio) { return std::fabs(rightPinX - leftPinX) + y2xRatio * std::fabs(topPinY - bottomPinY); } /** * @brief Get the New HPWL By Trying to move a PlacementUnit object * * For some procedures like legalization and packing, evaluate the wirelength change if the location of a * PlacementUnit is changed. * * @param curPU the PlacementUnit to be moved * @param targetPUX * @param targetPUY * @param y2xRatio * @return float */ inline float getNewHPWLByTrying(PlacementUnit *curPU, double targetPUX, double targetPUY, float y2xRatio) const { float tmp_leftX = 1e5; float tmp_rightX = -1e5; float tmp_topY = -1e5; float tmp_bottomY = 1e5; for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float pinX; if (tmpPU == curPU) { pinX = targetPUX + tmpPinOffset.x; } else { pinX = tmpPU->X() + tmpPinOffset.x; } if (pinX < tmp_leftX) { tmp_leftX = pinX; } if (pinX > tmp_rightX) { tmp_rightX = pinX; } } for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float pinY; if (tmpPU == curPU) { pinY = targetPUY + tmpPinOffset.y; } else { pinY = tmpPU->Y() + tmpPinOffset.y; } if (pinY < tmp_bottomY) { tmp_bottomY = pinY; } if (pinY > tmp_topY) { tmp_topY = pinY; } } int A_ClockRegionY, A_ClockRegionX; placementInfo->getDeviceInfo()->getClockRegionByLocation(tmp_rightX, tmp_bottomY, A_ClockRegionX, A_ClockRegionY); std::pair<int, int> A_ClockLocYX(A_ClockRegionY, A_ClockRegionX); int B_cellClockRegionY, B_cellClockRegionX; placementInfo->getDeviceInfo()->getClockRegionByLocation(tmp_leftX, tmp_topY, B_cellClockRegionX, B_cellClockRegionY); std::pair<int, int> B_ClockLocYX(B_cellClockRegionY, B_cellClockRegionX); float clockRegionOverhead = 0; // if (B_ClockLocYX != A_ClockLocYX) // { // clockRegionOverhead = std::abs(B_cellClockRegionX - A_ClockRegionX) * 15; // } return std::fabs(tmp_rightX - tmp_leftX) + y2xRatio * std::fabs(tmp_topY - tmp_bottomY) + clockRegionOverhead; } /** * @brief update the weights of 2-pin nets between PlacementUnits in this hyperedge(PlacementNet) according to * Bound2Bound net model * * In the quadratic placement, the wirelength(HPWL) can be modeled into a quadratic equation based on * Bound2Bound net model. The equation can be represented by matrix operation (XQX^T+PX) * * @param objectiveMatrixTripletList The non-Diag elements in matrix Q, stored in the vector of Eigen Triplet * (i,j,val) * @param objectiveMatrixDiag The Diag elements in matrix Q, stored in a 1-D vector * @param objectiveVector The elements in the vector P * @param generalWeight a weight given from external setting * @param pseudoWeight pseudo net weight to constrain the movement of PlacementUnits from their locations in * last optimization iteration * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @param updateX update the X-coordinate term in the quadratic problem * @param updateY update the X-coordinate term in the quadratic problem */ inline void updateBound2BoundNetWeight(std::vector<Eigen::Triplet<float>> &objectiveMatrixTripletList, std::vector<float> &objectiveMatrixDiag, Eigen::VectorXd &objectiveVector, float generalWeight, float y2xRatio, bool updateX, bool updateY, bool checkClockRegion = false) { assert(updateX ^ updateY); if (pinOffsetsInUnit.size() <= 1) return; float w = 2.0 * generalWeight / (float)(pinOffsetsInUnit.size() - 1); int nPins = pinOffsetsInUnit.size(); // adopt netDegree Weight from RippeFPGA if (nPins < 10) w *= 1.00; else if (nPins < 20) w *= 1.2; else if (nPins < 50) w *= 1.6; else if (nPins < 100) w *= 1.8; else if (nPins < 200) w *= 2.1; else w *= 2.5; float tmp_rightX = getRightPinX(), tmp_bottomY = getBottomPinY(), tmp_leftX = getLeftPinX(), tmp_topY = getTopPinY(); int A_ClockRegionY, A_ClockRegionX; placementInfo->getDeviceInfo()->getClockRegionByLocation(tmp_rightX, tmp_bottomY, A_ClockRegionX, A_ClockRegionY); std::pair<int, int> A_ClockLocYX(A_ClockRegionY, A_ClockRegionX); int B_cellClockRegionY, B_cellClockRegionX; placementInfo->getDeviceInfo()->getClockRegionByLocation(tmp_leftX, tmp_topY, B_cellClockRegionX, B_cellClockRegionY); std::pair<int, int> B_ClockLocYX(B_cellClockRegionY, B_cellClockRegionX); float clockRegionW = 0; w *= designNet->getOverallEnhanceRatio(); if (updateX) { if (checkClockRegion && B_cellClockRegionX != A_ClockRegionX) { clockRegionW = 1 + std::abs(B_cellClockRegionX - A_ClockRegionX) * 32 / (float)(pinOffsetsInUnit.size() - 1) * 0.01; if (clockRegionW > 3) clockRegionW = 3; // w *= clockRegionW; } // add net between left node and right node addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, leftPuId, rightPuId, leftPUX, rightPUX, pinOffsetsInUnit[leftPinId_net].x, pinOffsetsInUnit[rightPinId_net].x, !unitsOfNetPins[leftPinId_net]->isFixed(), !unitsOfNetPins[rightPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(leftPinId_net, rightPinId_net) * w / std::max(minDist, rightPinX - leftPinX)); // add net between internal node and left/right node for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPUId = tmpPU->getId(); auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float curX = tmpPU->X(); bool movable = !tmpPU->isFixed(); if (pinId_net != leftPinId_net && pinId_net != rightPinId_net) { addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, tmpPUId, leftPuId, curX, leftPUX, tmpPinOffset.x, pinOffsetsInUnit[leftPinId_net].x, movable, !unitsOfNetPins[leftPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(pinId_net, leftPinId_net) * w / std::max(minDist, (curX + tmpPinOffset.x) - leftPinX)); addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, tmpPUId, rightPuId, curX, rightPUX, tmpPinOffset.x, pinOffsetsInUnit[rightPinId_net].x, movable, !unitsOfNetPins[rightPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(pinId_net, rightPinId_net) * w / std::max(minDist, rightPinX - (curX + tmpPinOffset.x))); } } } if (updateY) { w *= y2xRatio; // add net between top node and bottom node addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, bottomPuId, topPuId, bottomPUY, topPUY, pinOffsetsInUnit[bottomPinId_net].y, pinOffsetsInUnit[topPinId_net].y, !unitsOfNetPins[bottomPinId_net]->isFixed(), !unitsOfNetPins[topPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(topPinId_net, bottomPinId_net) * w / std::max(minDist, topPinY - bottomPinY)); // add net between internal node and top/bottom node for (unsigned int pinId_net = 0; pinId_net < unitsOfNetPins.size(); pinId_net++) { auto tmpPU = unitsOfNetPins[pinId_net]; auto tmpPUId = tmpPU->getId(); auto tmpPinOffset = pinOffsetsInUnit[pinId_net]; float curY = tmpPU->Y(); bool movable = !tmpPU->isFixed(); if (pinId_net != topPinId_net && pinId_net != bottomPinId_net) { addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, tmpPUId, bottomPuId, curY, bottomPUY, tmpPinOffset.y, pinOffsetsInUnit[bottomPinId_net].y, movable, !unitsOfNetPins[bottomPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(pinId_net, bottomPinId_net) * w / std::max(minDist, (curY + tmpPinOffset.y) - bottomPinY)); addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, tmpPUId, topPuId, curY, topPUY, tmpPinOffset.y, pinOffsetsInUnit[topPinId_net].y, movable, !unitsOfNetPins[topPinId_net]->isFixed(), designNet->getPinPairEnhanceRatio(pinId_net, topPinId_net) * w / std::max(minDist, topPinY - (curY + tmpPinOffset.y))); } } } } inline void addPseudoNet_enhancePin2Pin(std::vector<Eigen::Triplet<float>> &objectiveMatrixTripletList, std::vector<float> &objectiveMatrixDiag, Eigen::VectorXd &objectiveVector, float generalWeight, float y2xRatio, bool updateX, bool updateY, int PUIdA, int PUIdB, int pinIdA_net, int pinIdB_net) { float w = generalWeight; if (updateX) { // add net between left node and right node addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, PUIdA, PUIdB, unitsOfNetPins[pinIdA_net]->X(), unitsOfNetPins[pinIdB_net]->X(), pinOffsetsInUnit[pinIdA_net].x, pinOffsetsInUnit[pinIdB_net].x, !unitsOfNetPins[pinIdA_net]->isFixed(), !unitsOfNetPins[pinIdB_net]->isFixed(), w); } if (updateY) { w *= y2xRatio; // add net between top node and bottom node addB2BNet(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, PUIdA, PUIdB, unitsOfNetPins[pinIdA_net]->Y(), unitsOfNetPins[pinIdB_net]->Y(), pinOffsetsInUnit[pinIdA_net].y, pinOffsetsInUnit[pinIdB_net].y, !unitsOfNetPins[pinIdA_net]->isFixed(), !unitsOfNetPins[pinIdB_net]->isFixed(), w); } } void drawNet(float generalWeight = 1.0); /** * @brief set weights of the terms in the quadratic problem * * min_x 0.5 * x'Px + q'x * s.t. l <= Ax <= u * * @param objectiveMatrixTripletList The non-Diag elements in matrix Q, stored in the vector of Eigen Triplet * (i,j,val) * @param objectiveMatrixDiag The Diag elements in matrix Q, stored in a 1-D vector * @param objectiveVector The elements in the vector P * @param puId0 PlacementUnit 0's Id (might be invaid -1) * @param puId1 PlacementUnit 1's Id (might be invaid -1) * @param pos0 PlacementUnit 0's position on one of the dimensions * @param pos1 PlacementUnit 1's position on one of the dimensions * @param pinOffset0 The pin's offset in PlacementUnit 0 * @param pinOffset1 The pin's offset in PlacementUnit 1 * @param movable0 whether the object 0 is movable * @param movable1 whether the object 0 is movable * @param w the weight of the net */ inline void addB2BNet(std::vector<Eigen::Triplet<float>> &objectiveMatrixTripletList, std::vector<float> &objectiveMatrixDiag, Eigen::VectorXd &objectiveVector, int puId0, int puId1, float pos0, float pos1, float pinOffset0, float pinOffset1, bool movable0, bool movable1, float w) { // min_x 0.5 * x'Px + q'x // s.t. l <= Ax <= u if (puId0 == puId1) return; if (movable0 && movable1) { // x0^2 + x1^2 - 2x0x1 + 2(x0c-x1c)x0 + 2(x1c-x0c)x1 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId0, w)); // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId1, w)); objectiveMatrixDiag[puId0] += w; objectiveMatrixDiag[puId1] += w; objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId1, -w)); objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId0, -w)); if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x0c-x1c)x0 + 2(x1c-x0c)x1 objectiveVector[puId0] += w * (pinOffset0 - pinOffset1); objectiveVector[puId1] += w * (pinOffset1 - pinOffset0); } } else if (movable0) { // x0^2 - 2x0x1 + 2(x0c-x1c)x0 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId0, w)); objectiveMatrixDiag[puId0] += w; objectiveVector[puId0] += -w * pos1; if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x0c-x1c)x0 objectiveVector[puId0] += w * (pinOffset0 - pinOffset1); } } else if (movable1) { // x1^2 - 2x0x1 + 2(x1c-x0c)x1 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId1, w)); objectiveMatrixDiag[puId1] += w; objectiveVector[puId1] += -w * pos0; if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x1c-x0c)x1 objectiveVector[puId1] += w * (pinOffset1 - pinOffset0); } } } inline std::set<PlacementUnit *> &getPUSet() { return PUSet; } inline float getLeftPinX() { assert(leftPuId >= 0); return leftPinX; } inline float getRightPinX() { assert(rightPuId >= 0); return rightPinX; } inline float getTopPinY() { assert(topPuId >= 0); return topPinY; } inline float getBottomPinY() { assert(bottomPuId >= 0); return bottomPinY; } inline int getLeftPUId() { assert(leftPuId >= 0); return leftPuId; } inline int getRightPUId() { assert(rightPuId >= 0); return rightPuId; } inline int getTopPUId() { assert(topPuId >= 0); return topPuId; } inline int getBottomPUId() { assert(bottomPuId >= 0); return bottomPuId; } inline bool isGlobalClock() { assert(designNet); return designNet->checkIsGlobalClock(); } private: DesignInfo::DesignNet *designNet = nullptr; std::vector<PlacementUnit *> unitsOfNetPins; std::vector<PlacementUnit *> unitsOfDriverPins; std::vector<PlacementUnit *> unitsOfPinsBeDriven; std::vector<pinOffset> pinOffsetsInUnit; std::set<PlacementUnit *> PUSet; int id; PlacementInfo *placementInfo = nullptr; float leftPUX, rightPUX, topPUY, bottomPUY; float leftPinX, rightPinX, topPinY, bottomPinY; unsigned int leftPuId, rightPuId, topPuId, bottomPuId; unsigned int leftPinId_net, rightPinId_net, topPinId_net, bottomPinId_net; float eps = 1e-5; float minDist = 1; }; /** * @brief a group of PlacementUnits * * record the rought resource demand and PlacementUnits inside it. Usually used for initial processing (initial * placement.) * */ class ClusterUnit { public: ClusterUnit(int id) : id(id) { PUs.clear(); totalWeight = 0; totalBRAMNum = 0; totalDSPNum = 0; } ~ClusterUnit(){}; inline int getWeight() { return totalWeight; }; inline int getBRAMNum() { return totalBRAMNum; }; inline int getDSPNum() { return totalDSPNum; }; inline void addPlacementUnit(PlacementInfo::PlacementUnit *curPU) { PUs.push_back(curPU); totalWeight += curPU->getWeight(); totalBRAMNum += curPU->getBRAMNum(); totalDSPNum += curPU->getDSPNum(); } inline std::vector<PlacementInfo::PlacementUnit *> &getUnits() { return PUs; } inline int getId() { return id; } private: std::vector<PlacementInfo::PlacementUnit *> PUs; int totalWeight, totalBRAMNum, totalDSPNum; int id; }; /** * @brief The net between the objects of ClusterUnit class * */ class ClusterNet { public: ClusterNet(int id) : id(id) { clusterUnits.clear(); } ~ClusterNet(){}; inline std::vector<ClusterUnit *> &getUnits() { return clusterUnits; } inline int getId() { return id; } inline void addClusterUnit(ClusterUnit *tmpCU) { clusterUnits.push_back(tmpCU); } private: std::vector<ClusterUnit *> clusterUnits; int id; }; /** * @brief Site bin for global placement for some specific Site types * * Currently it is not used in the implementation. * */ class PlacementSiteBinInfo { public: PlacementSiteBinInfo(float leftX, float rightX, float bottomY, float topY, int row, int column) : leftX(leftX), rightX(rightX), topY(topY), bottomY(bottomY), row(row), column(column) { correspondingSites.clear(); macros.clear(); } void addSiteIntoBin(DeviceInfo::DeviceSite *curSite); ~PlacementSiteBinInfo() { correspondingSites.clear(); macros.clear(); } inline bool inRange(float x, float y) { return (x <= rightX && x > leftX && y <= topY && y > bottomY); } inline bool inRangeY(float y) { return (y <= topY && y > bottomY); } inline void addMacroSite(PlacementMacro *curMacro, float occupationAdded) { if (curMacro) macros.push_back(curMacro); utilization += occupationAdded; } inline void reset() { macros.clear(); utilization = 0; } inline std::vector<PlacementMacro *> &getMacros() { return macros; } inline float getUtilizationRate() { if (capacity == 0) return utilization / 0.01; assert(capacity != 0); return (float)binShrinkRatio * utilization / capacity; } inline float getUtilization() { return (float)utilization; } inline float getCapacity() { return (float)capacity / binShrinkRatio; } inline bool isOverflow() { if (capacity == 0) { if (utilization == 0) return false; else return true; } assert(capacity != 0); return ((float)binShrinkRatio * utilization / capacity) > 1.00 + eps; } inline bool canAddMore(int BELAmo) { if (capacity == 0) return false; return ((float)binShrinkRatio * (utilization + BELAmo) / capacity) <= 1.00 + eps; } inline void setYX(int i, int j) { row = i; column = j; } inline int Y() { return row; } inline int X() { return column; } private: std::vector<DeviceInfo::DeviceSite *> correspondingSites; std::vector<PlacementMacro *> macros; int capacity = 0; float utilization = 0; float binShrinkRatio = 1.0; float leftX; float rightX; float topY; float bottomY; float eps = 1e-5; int row; int column; }; /** * @brief Construct a new Placement Info object based on the information of design and device * * @param designInfo * @param deviceInfo * @param JSONCfg user-defined placement configuration */ PlacementInfo(DesignInfo *designInfo, DeviceInfo *deviceInfo, std::map<std::string, std::string> &JSONCfg); ~PlacementInfo() { delete compatiblePlacementTable; for (auto typeGrid : SharedBELTypeBinGrid) for (auto curRow : typeGrid) for (auto curBin : curRow) delete curBin; for (auto curRow : siteGridForMacros) for (auto curBin : curRow) delete curBin; for (auto pn : placementNets) delete pn; } void printStat(bool verbose = false); /** * @brief describes the type mapping from design to device, where a cell can be placed (which BEL in which site) * * Since different cell types can be mapped to a group of resource BEL types, we handle the mapping in the following * way, with a intermediate Shared BEL Type: * * cell type A => A => BEL type 1 * * cell type B => Shared => BEL type 2 * * cell type C => BEL => BEL type 3 * * cell type D => Type => BEL type 4 * * @param cellType2fixedAmoFileName a file indicates how many slot will a cell of specific type cost * @param cellType2sharedCellTypeFileName a file indicates the mapping from cell types to shared BEL types * @param sharedCellType2BELtypeFileName a file indicates the mapping from shared BEL types to resource BEL types * @return CompatiblePlacementTable* */ CompatiblePlacementTable *loadCompatiblePlacementTable(std::string cellType2fixedAmoFileName, std::string cellType2sharedCellTypeFileName, std::string sharedCellType2BELtypeFileName) { return new CompatiblePlacementTable(cellType2fixedAmoFileName, cellType2sharedCellTypeFileName, sharedCellType2BELtypeFileName, designInfo, deviceInfo); } inline CompatiblePlacementTable *getCompatiblePlacementTable() { return compatiblePlacementTable; } void setBELTypeForCells(DesignInfo *designInfo) { compatiblePlacementTable->setBELTypeForCells(designInfo); } /** * @brief Get the Min X of sites to identify the boundary of the device * * @param sites * @return float */ float getMinXFromSites(std::vector<DeviceInfo::DeviceSite *> &sites); /** * @brief Get the Min Y of sites to identify the boundary of the device * * @param sites * @return float */ float getMinYFromSites(std::vector<DeviceInfo::DeviceSite *> &sites); /** * @brief Get the Max X of sites to identify the boundary of the device * * @param sites * @return float */ float getMaxXFromSites(std::vector<DeviceInfo::DeviceSite *> &sites); /** * @brief Get the Max Y of sites to identify the boundary of the device * * @param sites * @return float */ float getMaxYFromSites(std::vector<DeviceInfo::DeviceSite *> &sites); /** * @brief Create a grid of bins on the device * * @param binWidth the width of each bin * @param binHeight the height of each bin */ void createGridBins(float binWidth, float binHeight); void createSiteBinGrid(); /** * @brief update PlacementNet objects when there are some updates of PlacementUnit objects (e.g., some cells are * packed) * */ void reloadNets(); /** * @brief update the long path in the design and enhance their net weights * */ void updateLongPaths(); /** * @brief verify that each cells in the design can be mapped on the resource elements on the device. * */ void verifyDeviceForDesign(); inline std::vector<PlacementUnit *> &getPlacementUnits() { return placementUnits; } inline std::vector<PlacementMacro *> &getPlacementMacros() { return placementMacros; } inline std::vector<PlacementUnit *> &getFixedPlacementUnits() { return fixedPlacementUnits; } inline std::vector<PlacementNet *> &getPlacementNets() { return placementNets; } inline std::set<DesignInfo::DesignCell *> &getCellInMacros() { return cellInMacros; } inline std::map<int, PlacementUnit *> &getCellId2PlacementUnit() { return cellId2PlacementUnit; } inline std::vector<PlacementUnit *> &getCellId2PlacementUnitVec() { return cellId2PlacementUnitVec; } inline std::vector<PlacementUnpackedCell *> &getPlacementUnpackedCells() { return placementUnpackedCells; } inline int getNumCells() { return designInfo->getNumCells(); } /** * @brief Get the Global Max X (right boundary of the device) * * @return float */ inline float getGlobalMaxX() { return globalMaxX; } /** * @brief Get the Global Max Y (top boundary of the device) * * @return float */ inline float getGlobalMaxY() { return globalMaxY; } /** * @brief Get the Global Min X (left boundary of the device) * * @return float */ inline float getGlobalMinX() { return globalMinX; } /** * @brief Get the Global Min Y (bottom boundary of the device) * * @return float */ inline float getGlobalMinY() { return globalMinY; } /** * @brief get right boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ inline float getGlobalBinMaxLocX() { return endX; } /** * @brief get top boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ inline float getGlobalBinMaxLocY() { return endY; } /** * @brief get left boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ inline float getGlobalBinMinLocX() { return startX; } /** * @brief get bottom boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ inline float getGlobalBinMinLocY() { return startY; } inline float getDeviceMaxEdgeLength() { return std::max(endX - startX, endY - startY); } inline std::vector<int> &getPotentialBELTypeIDs(DesignInfo::DesignCell *cell) { return compatiblePlacementTable->getPotentialBELTypeIDs(cell); } inline std::vector<int> &getPotentialBELTypeIDs(DesignInfo::DesignCellType cellType) { return compatiblePlacementTable->getPotentialBELTypeIDs(cellType); } inline int getSharedBELTypeId(std::string tmpStr) { return compatiblePlacementTable->getSharedBELTypeId(tmpStr); } /** * @brief Get the actual occupation of a specific cell * * @param cell * @return float * * it will be the multiplication of cellId2Occupation and cellId2InfationRatio. * cellId2Occupation might be adjusted by the packing feasibility. * cellId2InfationRatio might be adjusted by the routing congestion level. */ inline float getActualOccupation(DesignInfo::DesignCell *cell) { return compatiblePlacementTable->getActualOccupation(cell); } /** * @brief Get the inflate ratio of a cell * * @param cell * @return float */ inline float getInflateRatio(DesignInfo::DesignCell *cell) { return compatiblePlacementTable->getInflateRatio(cell); } inline std::vector<float> &getcellId2Occupation() { return compatiblePlacementTable->getcellId2Occupation(); } /** * @brief Get the theoratical occupation of a specific cell type * * @param cellType * @return float */ inline float getOccupation(DesignInfo::DesignCellType cellType) { return compatiblePlacementTable->getOccupation(cellType); } /** * @brief Get the Actual Occupation By Cell Id * * @param id * @return float */ inline float getActualOccupationByCellId(int id) { return compatiblePlacementTable->getActualOccupationByCellId(id); } /** * @brief find neibor LUTs/FFs from bin grid * * @param curCell target Cell * @param displacementUpperbound displacement threshold * @param minNumNeighbor currently not used * @return std::vector<DesignInfo::DesignCell *>* */ inline std::vector<DesignInfo::DesignCell *> * findNeiborLUTFFsFromBinGrid(DesignInfo::DesignCell *curCell, float displacementUpperbound, int minNumNeighbor = 10) { // please note that the input DesignCell is only used to find the corresponding binGrid for site search. bool findLUT = curCell->isLUT(); float targetX = cellId2location[curCell->getCellId()].X; float targetY = cellId2location[curCell->getCellId()].Y; std::vector<DesignInfo::DesignCell *> *res = new std::vector<DesignInfo::DesignCell *>(); res->clear(); int binIdX, binIdY; getGridXY(targetX, targetY, binIdX, binIdY); assert(binIdY >= 0); assert((unsigned int)binIdY < LUTFFBinGrid.size()); assert(binIdX >= 0); assert((unsigned int)binIdX < LUTFFBinGrid[binIdY].size()); assert(LUTFFBinGrid[binIdY][binIdX]->inRange(targetX, targetY)); std::queue<std::pair<int, int>> binXYqueue; std::set<std::pair<int, int>> reachedBinXYs; binXYqueue.emplace(binIdX, binIdY); reachedBinXYs.emplace(binIdX, binIdY); bool findItself = false; while (binXYqueue.size() > 0) { std::pair<int, int> curXY = binXYqueue.front(); binXYqueue.pop(); int curbinIdX = curXY.first, curbinIdY = curXY.second; PlacementInfo::PlacementBinInfo *curBin = LUTFFBinGrid[curbinIdY][curbinIdX]; float bin2TargetXYDistance = curBin->getManhattanDistanceTo(targetX, targetY); if (bin2TargetXYDistance > displacementUpperbound) continue; for (auto tmpCell : curBin->getCells()) { if ((tmpCell->isLUT() && findLUT) || (tmpCell->isFF() && !findLUT)) { float tmpX = cellId2location[tmpCell->getCellId()].X; float tmpY = cellId2location[tmpCell->getCellId()].Y; float tmpPUDis = fabs(targetX - tmpX) + y2xRatio * fabs(targetY - tmpY); if (tmpPUDis <= displacementUpperbound) { if (tmpCell == curCell) { findItself = true; } res->push_back(tmpCell); } } } assert(findItself); for (int nextY = curbinIdY - 1; nextY <= curbinIdY + 1; nextY++) { for (int nextX = curbinIdX - 1; nextX <= curbinIdX + 1; nextX++) { if (!(nextY >= 0)) continue; if (!((unsigned int)nextY < LUTFFBinGrid.size())) continue; if (!(nextX >= 0)) continue; if (!((unsigned int)nextX < LUTFFBinGrid[binIdY].size())) continue; PlacementInfo::PlacementBinInfo *nextBin = LUTFFBinGrid[nextY][nextX]; float nextBin2TargetXYDistance = nextBin->getManhattanDistanceTo(targetX, targetY); if (nextBin2TargetXYDistance > displacementUpperbound) continue; std::pair<int, int> nextXY(nextX, nextY); if (reachedBinXYs.find(nextXY) == reachedBinXYs.end()) { reachedBinXYs.insert(nextXY); binXYqueue.push(nextXY); } } } } assert(findItself); return res; } /** * @brief Get the Bin Grid object * * @param BELTypeId indicate the target BELtype ID, since we map the different resource to different grid for easier * processing * @return std::vector<std::vector<PlacementBinInfo *>>& */ inline std::vector<std::vector<PlacementBinInfo *>> &getBinGrid(unsigned int BELTypeId) { assert(BELTypeId < SharedBELTypeBinGrid.size()); return SharedBELTypeBinGrid[BELTypeId]; } /** * @brief Get the Bin Grid object for all types of BEL * * @return std::vector<std::vector<std::vector<PlacementBinInfo *>>>& */ inline std::vector<std::vector<std::vector<PlacementBinInfo *>>> &getBinGrid() { return SharedBELTypeBinGrid; } inline std::vector<std::vector<PlacementSiteBinInfo *>> &getSiteBinGrid() { return siteGridForMacros; } inline PlacementUnit *getPlacementUnitByCell(DesignInfo::DesignCell *curCell) { assert(curCell); assert((unsigned int)curCell->getCellId() < cellId2PlacementUnitVec.size()); return cellId2PlacementUnitVec[curCell->getCellId()]; } inline PlacementUnit *getPlacementUnitByCellId(int cellId) { assert(cellId >= 0); assert((unsigned int)cellId < cellId2PlacementUnitVec.size()); return cellId2PlacementUnitVec[cellId]; } /** * @brief directly set weight in the quadratic Matrix and vector according to given request. * * Usually B2B Net will be called inside PlacementNet, but we set this API to handle pseudo net settings. * * @param objectiveMatrixTripletList The non-Diag elements in matrix Q, stored in the vector of Eigen Triplet * (i,j,val) * @param objectiveMatrixDiag The Diag elements in matrix Q, stored in a 1-D vector * @param objectiveVector The elements in the vector P * @param puId0 PlacementUnit 0's Id (might be invaid -1) * @param puId1 PlacementUnit 1's Id (might be invaid -1) * @param pos0 PlacementUnit 0's position on one of the dimensions * @param pos1 PlacementUnit 1's position on one of the dimensions * @param pinOffset0 The pin's offset in PlacementUnit 0 * @param pinOffset1 The pin's offset in PlacementUnit 1 * @param movable0 whether the object 0 is movable * @param movable1 whether the object 0 is movable * @param w the weight of the net */ inline void addB2BNetInPlacementInfo(std::vector<Eigen::Triplet<float>> &objectiveMatrixTripletList, std::vector<float> &objectiveMatrixDiag, Eigen::VectorXd &objectiveVector, int puId0, int puId1, float pos0, float pos1, float pinOffset0, float pinOffset1, bool movable0, bool movable1, float w) { // min_x 0.5 * x'Px + q'x // s.t. l <= Ax <= u if (puId0 == puId1) return; if (movable0 && movable1) { // x0^2 + x1^2 - 2x0x1 + 2(x0c-x1c)x0 + 2(x1c-x0c)x1 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId0, w)); // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId1, w)); objectiveMatrixDiag[puId0] += w; objectiveMatrixDiag[puId1] += w; objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId1, -w)); objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId0, -w)); if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x0c-x1c)x0 + 2(x1c-x0c)x1 objectiveVector[puId0] += w * (pinOffset0 - pinOffset1); objectiveVector[puId1] += w * (pinOffset1 - pinOffset0); } } else if (movable0) { // x0^2 - 2x0x1 + 2(x0c-x1c)x0 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId0, puId0, w)); objectiveMatrixDiag[puId0] += w; objectiveVector[puId0] += -w * pos1; if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x0c-x1c)x0 objectiveVector[puId0] += w * (pinOffset0 - pinOffset1); } } else if (movable1) { // x1^2 - 2x0x1 + 2(x1c-x0c)x1 // objectiveMatrixTripletList.push_back(Eigen::Triplet<float>(puId1, puId1, w)); objectiveMatrixDiag[puId1] += w; objectiveVector[puId1] += -w * pos0; if (fabs(pinOffset0) > eps || fabs(pinOffset1) > eps) { // 2(x1c-x0c)x1 objectiveVector[puId1] += w * (pinOffset1 - pinOffset0); } } } /** * @brief directly set weight in the quadratic Matrix and vector according to given request. * * Usually B2B Net will be called inside PlacementNet, but we set this API to handle pseudo net settings. * * @param objectiveMatrixTripletList The non-Diag elements in matrix Q, stored in the vector of Eigen Triplet * (i,j,val) * @param objectiveMatrixDiag The Diag elements in matrix Q, stored in a 1-D vector * @param objectiveVector The elements in the vector P * @param tmpPU PlacementUnit which need a pseudo net * @param targetLoc the location of the anchor which the pseudo net connected to * @param pseudoWeight pseudo net weight to constrain the movement of PlacementUnits from their locations in * last optimization iteration * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @param updateX update the X-coordinate term in the quadratic problem * @param updateY update the X-coordinate term in the quadratic problem */ inline void addPseudoNetsInPlacementInfo(std::vector<Eigen::Triplet<float>> &objectiveMatrixTripletList, std::vector<float> &objectiveMatrixDiag, Eigen::VectorXd &objectiveVector, PlacementUnit *tmpPU, float targetLoc, float pseudoWeight, float y2xRatio, bool updateX, bool updateY) { assert(updateX ^ updateY); if (updateY) { pseudoWeight *= y2xRatio; } bool movable = !tmpPU->isFixed(); if (movable) { addB2BNetInPlacementInfo(objectiveMatrixTripletList, objectiveMatrixDiag, objectiveVector, tmpPU->getId(), -1, targetLoc, targetLoc, 0, 0, true, false, pseudoWeight); } } /** * @brief update the mapping from Cells to PlacementUnits, since sometime, PlacementUnits might change * */ void updateCells2PlacementUnits(); /** * @brief map design cells to the bins in the bin grid. * */ void updateElementBinGrid(); /** * @brief adjust the resource demand of LUTs/FFs according to routing congestion * refer to RippleFPGA's implementation * @param enfore enfore adjustment without considering other factors */ void adjustLUTFFUtilization_Routability(bool enfore); /** * @brief reset the inflate ratio of all the cells to be 1, for re-evaluation * */ void adjustLUTFFUtilization_Routability_Reset(); /** * @brief adjust the resource demand of LUTs/FFs according to packing feasibility * * @param neighborDisplacementUpperbound set displacement threshold to identify the neighbors of a cell * @param enfore enfore adjustment without considering other factors */ void adjustLUTFFUtilization_Packablity(float neighborDisplacementUpperbound, bool enfore); /** * @brief adjust the resource demand of LUTs/FFs according to packing feasibility and routing congestion * * @param neighborDisplacementUpperbound set displacement threshold to identify the neighbors of a cell * @param enfore enfore adjustment without considering other factors */ void adjustLUTFFUtilization(float neighborDisplacementUpperbound, bool enfore = false); /** * @brief adjust the utlization of clock-related elements to mitigate the overflow of clock utilization * */ void adjustLUTFFUtilization_Clocking(); /** * @brief clean the information in bin grid * */ void resetElementBinGrid(); void updateSiteBinGrid(); void resetSiteBinGrid(); inline DesignInfo *getDesignInfo() { return designInfo; } inline DeviceInfo *getDeviceInfo() { return deviceInfo; } inline PlacementTimingInfo *getTimingInfo() { return simplePlacementTimingInfo; } /** * @brief get the remapped BEL type of a specific BEL type * since some cell can be placed in sites of different sites. For cell spreading, we need to remap some BEL types to * a unified BEL types. Belows are some examples: * * SLICEM_CARRY8 => SLICEL_CARRY8 * * SLICEM_LUT => SLICEL_LUT * * SLICEM_FF => SLICEL_FF * @param curBELType * @return std::string */ inline std::string getBELType2FalseBELType(std::string curBELType) { return deviceInfo->getBELType2FalseBELType(curBELType); } /** * @brief Get the Grid row/column based on given location X,Y * * @param cellX input X * @param cellY input Y * @param binIdX output target column of grid * @param binIdY output target row of grid */ inline void getGridXY(float cellX, float cellY, int &binIdX, int &binIdY) { float coord_offsetX = cellX - startX; float coord_offsetY = cellY - startY; binIdX = static_cast<int>((coord_offsetX) / binWidth); binIdY = static_cast<int>((coord_offsetY) / binHeight); } /** * @brief move the PlacementUnit to ensure the cells in it are within the device area. * * @param curPU target PlacementUnit * @param fX output X of PlacementUnit that can ensure the cells in it are within the device area. * @param fY output Y of PlacementUnit that can ensure the cells in it are within the device area. */ inline void legalizeXYInArea(PlacementUnit *curPU, float &fX, float &fY) { if (curPU->getType() == PlacementUnitType_UnpackedCell) { fX = std::max(globalMinX + eps, (std::min(fX, globalMaxX - eps))); fY = std::max(globalMinY + eps, (std::min(fY, globalMaxY - eps))); } else if (auto curMacro = dynamic_cast<PlacementMacro *>(curPU)) { if (fY + curMacro->getTopOffset() > globalMaxY - eps) { fY = (globalMaxY - 2 * eps) - curMacro->getTopOffset(); } else if (fY + curMacro->getBottomOffset() < globalMinY + eps) { fY = (globalMinY + 2 * eps) - curMacro->getBottomOffset(); } if (fX + curMacro->getRightOffset() > globalMaxX - eps) { fX = (globalMaxX - 2 * eps) - curMacro->getRightOffset(); } else if (fX + curMacro->getLeftOffset() < globalMinX + eps) { fX = (globalMinX + 2 * eps) - curMacro->getLeftOffset(); } } else { assert(false && "wrong placement unit type"); } } /** * @brief move the PlacementUnit to ensure the cells in it are within the device area. * * @param curPU target PlacementUnit */ inline void enforceLegalizeXYInArea(PlacementUnit *curPU) { float fX = curPU->X(); float fY = curPU->Y(); if (curPU->getType() == PlacementUnitType_UnpackedCell) { fX = std::max(globalMinX + eps, (std::min(fX, globalMaxX - eps))); fY = std::max(globalMinY + eps, (std::min(fY, globalMaxY - eps))); } else if (auto curMacro = dynamic_cast<PlacementMacro *>(curPU)) { if (fY + curMacro->getTopOffset() > globalMaxY - eps) { fY = (globalMaxY - 2 * eps) - curMacro->getTopOffset(); } else if (fY + curMacro->getBottomOffset() < globalMinY + eps) { fY = (globalMinY + 2 * eps) - curMacro->getBottomOffset(); } if (fX + curMacro->getRightOffset() > globalMaxX - eps) { fX = (globalMaxX - 2 * eps) - curMacro->getRightOffset(); } else if (fX + curMacro->getLeftOffset() < globalMinX + eps) { fX = (globalMinX + 2 * eps) - curMacro->getLeftOffset(); } } else { assert(false && "wrong placement unit type"); } if (fX != curPU->X() || fY != curPU->Y()) { curPU->setAnchorLocationAndForgetTheOriginalOne(fX, fY); } } /** * @brief check whether the PlacementUnit is legalized in the device area when a cell in it is placed at target * location * * @param curCell * @param targetX * @param targetY * @return true the PlacementUnit is legalized in the device area when a cell in it is placed at target location * @return false the PlacementUnit cannot be legalized in the device area when a cell in it is placed at target * location */ inline bool isLegalLocation(DesignInfo::DesignCell *curCell, float targetX, float targetY) { auto curPU = cellId2PlacementUnit[curCell->getCellId()]; if (curPU->getType() == PlacementUnitType_UnpackedCell) { float fX = targetX; float fY = targetY; fX = std::max(globalMinX + eps, (std::min(fX, globalMaxX - eps))); fY = std::max(globalMinY + eps, (std::min(fY, globalMaxY - eps))); return (std::fabs(fX - targetX) + std::fabs(fY - targetY)) < eps; } else if (auto curMacro = dynamic_cast<PlacementMacro *>(curPU)) { float offsetX = curMacro->getCellOffsetXInMacro(curCell); float offsetY = curMacro->getCellOffsetYInMacro(curCell); float fX = targetX - offsetX; float fY = targetY - offsetY; if (fY + curMacro->getTopOffset() > globalMaxY - eps) { fY = (globalMaxY - 2 * eps) - curMacro->getTopOffset(); } else if (fY + curMacro->getBottomOffset() < globalMinY + eps) { fY = (globalMinY + 2 * eps) - curMacro->getBottomOffset(); } if (fX + curMacro->getRightOffset() > globalMaxX - eps) { fX = (globalMaxX - 2 * eps) - curMacro->getRightOffset(); } else if (fX + curMacro->getLeftOffset() < globalMinX + eps) { fX = (globalMinX + 2 * eps) - curMacro->getLeftOffset(); } return (std::fabs(fX + offsetX - targetX) + std::fabs(fY + offsetY - targetY)) < eps; } else { assert(false && "should not reach here"); return false; } } /** * @brief check whether the PlacementUnit is legalized in the device area when it is placed at target location * * @param curPU * @param targetX * @param targetY * @return true the PlacementUnit is legalized in the device area when a cell in it is placed at target location * @return false the PlacementUnit cannot be legalized in the device area when a cell in it is placed at target * location */ inline bool isLegalLocation(PlacementUnit *curPU, float targetX, float targetY) { if (curPU->getType() == PlacementUnitType_UnpackedCell) { float fX = targetX; float fY = targetY; fX = std::max(globalMinX + eps, (std::min(fX, globalMaxX - eps))); fY = std::max(globalMinY + eps, (std::min(fY, globalMaxY - eps))); return (std::fabs(fX - targetX) + std::fabs(fY - targetY)) < eps; } else if (auto curMacro = dynamic_cast<PlacementMacro *>(curPU)) { float fX = targetX; float fY = targetY; if (fY + curMacro->getTopOffset() > globalMaxY - eps) { fY = (globalMaxY - 2 * eps) - curMacro->getTopOffset(); } else if (fY + curMacro->getBottomOffset() < globalMinY + eps) { fY = (globalMinY + 2 * eps) - curMacro->getBottomOffset(); } if (fX + curMacro->getRightOffset() > globalMaxX - eps) { fX = (globalMaxX - 2 * eps) - curMacro->getRightOffset(); } else if (fX + curMacro->getLeftOffset() < globalMinX + eps) { fX = (globalMinX + 2 * eps) - curMacro->getLeftOffset(); } return (std::fabs(fX - targetX) + std::fabs(fY - targetY)) < eps; } else { assert(false && "should not reach here"); return false; } } inline void getPULocationByCellLocation(DesignInfo::DesignCell *curCell, float targetX, float targetY, float &PUX, float &PUY) { auto curPU = cellId2PlacementUnit[curCell->getCellId()]; if (curPU->getType() == PlacementUnitType_UnpackedCell) { float fX = targetX; float fY = targetY; fX = std::max(globalMinX + eps, (std::min(fX, globalMaxX - eps))); fY = std::max(globalMinY + eps, (std::min(fY, globalMaxY - eps))); PUX = fX; PUY = fY; } else if (auto curMacro = dynamic_cast<PlacementMacro *>(curPU)) { float offsetX = curMacro->getCellOffsetXInMacro(curCell); float offsetY = curMacro->getCellOffsetYInMacro(curCell); float fX = targetX - offsetX; float fY = targetY - offsetY; if (fY + curMacro->getTopOffset() > globalMaxY - eps) { fY = (globalMaxY - 2 * eps) - curMacro->getTopOffset(); } else if (fY + curMacro->getBottomOffset() < globalMinY + eps) { fY = (globalMinY + 2 * eps) - curMacro->getBottomOffset(); } if (fX + curMacro->getRightOffset() > globalMaxX - eps) { fX = (globalMaxX - 2 * eps) - curMacro->getRightOffset(); } else if (fX + curMacro->getLeftOffset() < globalMinX + eps) { fX = (globalMinX + 2 * eps) - curMacro->getLeftOffset(); } PUX = fX; PUY = fY; } else { PUX = -1; PUY = -1; assert(false && "should not reach here."); } } typedef struct Location { float X = -10; float Y = -10; } Location; inline std::vector<Location> &getCellId2location() { return cellId2location; } inline std::vector<Location> &getPinId2location() { return pinId2location; } /** * @brief Get the width of a bin in grid * * @return float */ inline float getBinGridW() { return binWidth; } /** * @brief Get the height of a bin in grid * * @return float */ inline float getBinGridH() { return binHeight; } /** * @brief record the bin information for a cell (BELtype, column/row, resource demand) * */ typedef struct CellBinInfo { int sharedTypeId = -1; int X = -1; int Y = -1; float occupation = -1; } CellBinInfo; /** * @brief set the legalization of some PlacementUnit objects * * @param PU2X X of PlacementUnits * @param PU2Y Y of PlacementUnits */ inline void setPULegalXY(std::map<PlacementInfo::PlacementUnit *, float> &PU2X, std::map<PlacementInfo::PlacementUnit *, float> &PU2Y) { for (auto tmpPair : PU2X) // only update elements in PU2X and PU2Y { PULegalXY.first[tmpPair.first] = tmpPair.second; } for (auto tmpPair : PU2Y) { PULegalXY.second[tmpPair.first] = tmpPair.second; } } /** * @brief set the sites occupied by the PlacementUnit objects * * @param PU2Sites a mapping from PlaceuementUnit objects to device sites */ inline void setPULegalSite(std::map<PlacementInfo::PlacementUnit *, std::vector<DeviceInfo::DeviceSite *>> &PU2Sites) { for (auto tmpPair : PU2Sites) // only update elements in PU2X and PU2Y { PU2LegalSites[tmpPair.first] = tmpPair.second; } } /** * @brief get the sites occupied by the legalized PlacementUnit objects * * @return std::map<PlacementInfo::PlacementUnit *, std::vector<DeviceInfo::DeviceSite *>>& */ inline std::map<PlacementInfo::PlacementUnit *, std::vector<DeviceInfo::DeviceSite *>> &getPULegalSite() { return PU2LegalSites; } /** * @brief get the locations (pair of X,Y) of the legalized PlacementUnit objects * * @return std::pair<std::map<PlacementInfo::PlacementUnit *, float>, std::map<PlacementInfo::PlacementUnit *, * float>>& */ inline std::pair<std::map<PlacementInfo::PlacementUnit *, float>, std::map<PlacementInfo::PlacementUnit *, float>> & getPULegalXY() { return PULegalXY; } /** * @brief forget all the legalization information * */ void resetPULegalInformation() { PULegalXY.first = std::map<PlacementInfo::PlacementUnit *, float>(); PULegalXY.second = std::map<PlacementInfo::PlacementUnit *, float>(); PU2LegalSites.clear(); } /** * @brief remove the legalization information of a PlacementUnit object * * @param curPU */ inline void deleteLegalizationInfoFor(PlacementInfo::PlacementUnit *curPU) { if (PU2LegalSites.find(curPU) != PU2LegalSites.end()) PU2LegalSites.erase(curPU); if (PULegalXY.first.find(curPU) != PULegalXY.first.end()) PULegalXY.first.erase(curPU); if (PULegalXY.second.find(curPU) != PULegalXY.second.end()) PULegalXY.second.erase(curPU); } /** * @brief Set the cell bin Information of a design cell * * @param cellId the Id of the design cell * @param sharedTypeId which BEL type the design cell is * @param X the column in the grid of the bin which the cell is located in * @param Y the row in the grid of the bin which the cell is located in * @param occupation how much resource is cost by the design cell */ inline void setCellBinInfo(int cellId, int sharedTypeId, int X, int Y, float occupation) { assert((unsigned int)cellId < cellId2CellBinInfo.size()); cellId2CellBinInfo[cellId].sharedTypeId = sharedTypeId; cellId2CellBinInfo[cellId].X = X; cellId2CellBinInfo[cellId].Y = Y; cellId2CellBinInfo[cellId].occupation = occupation; } /** * @brief update the bin information of a design cell when it is moved to a new location * * When a cell is moved to a new location, corresponding bins should be updated accordingly. * * @param cellId the Id of the design cell * @param fX the X coordinate the cell is moved to * @param fY the Y coordinate the cell is moved to */ inline void transferCellBinInfo(int cellId, float fX, int fY) { assert((unsigned int)cellId < cellId2CellBinInfo.size()); int binIdX, binIdY; getGridXY(fX, fY, binIdX, binIdY); assert(binIdY >= 0); assert((unsigned int)binIdY < SharedBELTypeBinGrid[cellId2CellBinInfo[cellId].sharedTypeId].size()); assert(binIdX >= 0); assert((unsigned int)binIdX < SharedBELTypeBinGrid[cellId2CellBinInfo[cellId].sharedTypeId][binIdY].size()); if (cellId2CellBinInfo[cellId].X == binIdX && cellId2CellBinInfo[cellId].Y == binIdY) return; assert(cellId2CellBinInfo[cellId].occupation >= 0); SharedBELTypeBinGrid[cellId2CellBinInfo[cellId].sharedTypeId][cellId2CellBinInfo[cellId].Y] [cellId2CellBinInfo[cellId].X] ->removeCell(designInfo->getCells()[cellId], cellId2CellBinInfo[cellId].occupation); SharedBELTypeBinGrid[cellId2CellBinInfo[cellId].sharedTypeId][binIdY][binIdX]->addCell( designInfo->getCells()[cellId], cellId2CellBinInfo[cellId].occupation); cellId2CellBinInfo[cellId].X = binIdX; cellId2CellBinInfo[cellId].Y = binIdY; } /** * @brief Get the Displacement from a given location to a device site (y2xRatio is considered.) * * @param fX given X * @param fY given Y * @param curSite target device site * @return float */ inline float getDisplacement(float fX, float fY, DeviceInfo::DeviceSite *curSite) { return std::fabs(fX - curSite->X()) + y2xRatio * std::fabs(fY - curSite->Y()); } /** * @brief find neibor device sites of a given cell from bin grid * * @param curCell target cell * @param targetX target location X * @param targetY target location Y * @param displacementThreshold the displacement threshold from the sites to the target location * @param siteNumThreshold if the number of sites exceed this threshold, stop the searching * @return std::vector<DeviceInfo::DeviceSite *>* */ inline std::vector<DeviceInfo::DeviceSite *> *findNeiborSiteFromBinGrid(DesignInfo::DesignCell *curCell, float targetX, float targetY, float displacementThreshold, int siteNumThreshold) { // please note that the input DesignCell is only used to find the corresponding binGrid for site search. std::vector<DeviceInfo::DeviceSite *> *res = new std::vector<DeviceInfo::DeviceSite *>(0); int binIdX, binIdY; getGridXY(targetX, targetY, binIdX, binIdY); while (res->size() == 0) { auto sharedTypeIds = getPotentialBELTypeIDs(curCell->getCellType()); for (auto sharedTypeId : sharedTypeIds) { assert((unsigned int)curCell->getCellId() < cellId2CellBinInfo.size()); assert(binIdY >= 0); assert((unsigned int)binIdY < SharedBELTypeBinGrid[sharedTypeId].size()); assert(binIdX >= 0); assert((unsigned int)binIdX < SharedBELTypeBinGrid[sharedTypeId][binIdY].size()); std::vector<std::vector<PlacementBinInfo *>> &curBinGrid = SharedBELTypeBinGrid[sharedTypeId]; std::queue<std::pair<int, int>> binXYqueue; std::set<std::pair<int, int>> reachedBinXYs; binXYqueue.emplace(binIdX, binIdY); reachedBinXYs.emplace(binIdX, binIdY); while (binXYqueue.size() > 0) { std::pair<int, int> curXY = binXYqueue.front(); binXYqueue.pop(); int curbinIdX = curXY.first, curbinIdY = curXY.second; PlacementBinInfo *curBin = curBinGrid[curbinIdY][curbinIdX]; float bin2TargetXYDistance = curBin->getManhattanDistanceTo(targetX, targetY); if (bin2TargetXYDistance > displacementThreshold) continue; int findSiteCnt = 0; for (auto curSite : curBin->getCorrespondingSites()) { if (!curSite->isOccupied() && !curSite->isMapped()) { if (getDisplacement(targetX, targetY, curSite) < displacementThreshold) { findSiteCnt++; res->push_back(curSite); } } } if (res->size() < (unsigned int)siteNumThreshold) { for (int nextY = curbinIdY - 1; nextY <= curbinIdY + 1; nextY++) { for (int nextX = curbinIdX - 1; nextX <= curbinIdX + 1; nextX++) { if (!(nextY >= 0)) continue; if (!((unsigned int)nextY < SharedBELTypeBinGrid[sharedTypeId].size())) continue; if (!(nextX >= 0)) continue; if (!((unsigned int)nextX < SharedBELTypeBinGrid[sharedTypeId][binIdY].size())) continue; PlacementBinInfo *nextBin = curBinGrid[nextY][nextX]; float nextBin2TargetXYDistance = nextBin->getManhattanDistanceTo(targetX, targetY); if (nextBin2TargetXYDistance > displacementThreshold) continue; std::pair<int, int> nextXY(nextX, nextY); if (reachedBinXYs.find(nextXY) == reachedBinXYs.end()) { reachedBinXYs.insert(nextXY); binXYqueue.push(nextXY); } } } } } } displacementThreshold *= 1.5; } return res; } inline std::vector<CellBinInfo> &getCellId2CellBinInfo() { return cellId2CellBinInfo; } inline std::vector<DesignInfo::DesignCell *> &getCells() { return designInfo->getCells(); } inline std::vector<std::vector<PlacementNet *>> &getPlacementUnitId2Nets() { return placementUnitId2Nets; } /** * @brief update the B2B net model for the placement and get the total HPWL of all the nets in the design * * @return double */ double updateB2BAndGetTotalHPWL() { double totalHPWL = 0.0; int numNet = placementNets.size(); #pragma omp parallel for for (int netId = 0; netId < numNet; netId++) { auto net = placementNets[netId]; net->updateNetBounds(true, true); } //#pragma omp parallel for reduction(+ : totalHPWL) for (int netId = 0; netId < numNet; netId++) { auto net = placementNets[netId]; totalHPWL += net->getHPWL(y2xRatio); } return totalHPWL; } /** * @brief get the total HPWL of all the nets in the design without updating the B2B net model for the placement * * @return double */ double getTotalHPWL() { double totalHPWL = 0.0; int numNet = placementNets.size(); //#pragma omp parallel for reduction(+ : totalHPWL) for (int netId = 0; netId < numNet; netId++) { auto net = placementNets[netId]; totalHPWL += net->getHPWL(y2xRatio); } return totalHPWL; } /** * @brief Set the progress ratio, indicating the progress of the placement convergence, * * the progress ratio is usually HPWL_lower / HPWL_upper * * @param p */ inline void setProgress(float p) { placementProressRatio = p; } /** * @brief Get the Progress ratio of the placement * * @return float */ inline float getProgress() { return placementProressRatio; } /** * @brief dump the congestion mesh grid for evaluation * * @param dumpFileName */ void dumpCongestion(std::string dumpFileName); /** * @brief dump the placement commands to place cells in Vivado (do not use this temporarily) * * (do not use this temporarily) * We move this functionality to the packer/placer. * There is a conterpart function in ParallelPack, which is relatively complete. * We will collect those information back to PlacementInfo in later implementation. * * @param dumpFile */ void dumpVivadoPlacementTclWithPULegalizationInfo(std::string dumpFile); /** * @brief dump the PlacementUnit objects and some placement parameters as a checkpoint * * @param dumpFile */ void dumpPlacementUnitInformation(std::string dumpFile); /** * @brief load the data of the PlacementUnit objects and some placement parameters from a checkpoint file * * @param locationFile */ void loadPlacementUnitInformation(std::string locationFile); /** * @brief Set the Pseudo Net Weight according to a given value * * This pseudo net weight will be used in the global placement iteration * * @param weight */ inline void setPseudoNetWeight(float weight) { oriPseudoNetWeight = weight; } /** * @brief Get the Pseudo Net Weight object * * usually it is used to set the configuration of placer or dump placement information * * @return float */ inline float getPseudoNetWeight() { assert(oriPseudoNetWeight > 0 && "should be set before get"); return oriPseudoNetWeight; } /** * @brief Get the Macro Pseudo Net Enhance Counter * * The legalization pseudo nets for macros are strengthened as this counter is increased, so we can force the macros * get closer and closer to their legal location. * * @return int */ inline int getMacroPseudoNetEnhanceCnt() { if (JSONCfg.find("DirectMacroLegalize") != JSONCfg.end()) { if (JSONCfg["DirectMacroLegalize"] == "true") { return macroPseudoNetEnhanceCnt; } } assert(macroPseudoNetEnhanceCnt > 0 && "should be set before get"); return macroPseudoNetEnhanceCnt; } /** * @brief Get the Macro Legalization Weight * * it is the legalization pseudo nets for macros. * * @return float */ inline float getMacroLegalizationWeight() { assert(macroLegalizationWeight > 0 && "should be set before get"); return macroLegalizationWeight; } /** * @brief Set the Macro Legalization Parameters * * We have specific pseudo net parameters for macro legalizations. We need to set them if loading a check point or * re-configuring the placement. * * @param cnt MacroPseudoNetEnhanceCnt * @param _macroLegalizationWeight MacroPseudoNetEnhanceCnt */ inline void setMacroLegalizationParameters(int cnt, float _macroLegalizationWeight) { macroPseudoNetEnhanceCnt = cnt; macroLegalizationWeight = _macroLegalizationWeight; } /** * @brief reset the LUTFFDeterminedOccupation object * * LUTFFDeterminedOccupation is used to record the final resource demand of a LUT/FF after final packing * */ void resetLUTFFDeterminedOccupation() { designInfo->resetLUTFFDeterminedOccupation(); } /** * @briefget the Determined Occupation of a specific cell * * LUTFFDeterminedOccupation is used to record the final resource demand of a LUT/FF after final packing * @param cellId target cell * @return int */ inline int getDeterminedOccupation(int cellId) { return designInfo->getDeterminedOccupation(cellId); } /** * @brief Set the Determined Occupation of a specific cell * * LUTFFDeterminedOccupation is used to record the final resource demand of a LUT/FF after final packing * * @param cellId target cell * @param occupation resource demand of the cell after packing */ inline void setDeterminedOccupation(int cellId, int occupation) { designInfo->setDeterminedOccupation(cellId, occupation); } /** * @brief Get the Pair Pin Num of two LUTs * * Two LUTs can share the input pins of a BEL in CLB. However, device architecture might have requirements on their * demands of the number of input pins. This function will return the total number of input pins for the two LUTs. * * @param LUTA LUT Cell A * @param LUTB LUT Cell A * @return unsigned int */ inline unsigned int getPairPinNum(DesignInfo::DesignCell *LUTA, DesignInfo::DesignCell *LUTB) { if (LUTA->getInputPins().size() == 6 || LUTB->getInputPins().size() == 6 || LUTA->isLUT6() || LUTB->isLUT6()) return 12; int pinNumA = 0; int totalPin = 0; int netIds[5]; // be aware that a LUT might have pins connected to the same net and they should be treated as // different inputs. for (auto tmpPin : LUTA->getInputPins()) { if (!tmpPin->isUnconnected()) { netIds[pinNumA] = tmpPin->getNet()->getElementIdInType(); pinNumA++; } } totalPin = pinNumA; for (auto tmpPin : LUTB->getInputPins()) { if (!tmpPin->isUnconnected()) { bool matched = false; for (int i = 0; i < pinNumA; i++) { if (netIds[i] >= 0 && netIds[i] == tmpPin->getNet()->getElementIdInType()) { netIds[i] = -1; matched = true; break; } } if (!matched) { totalPin++; } } } return totalPin; } /** * @brief calculate the proportion of the PlacementUnit objects with high interconnection density * * if a PlacementUnit connects to more than 30 nets, count it. * calculate the proportion of the PlacementUnit objects with high interconnection density * */ inline void calculateNetNumDistributionOfPUs() { int manyNetCnt = 0; for (auto tmpPU : placementUnits) { if (tmpPU->getNetsSetPtr()->size() >= 30) { manyNetCnt++; } } PUWithManyNetsRatio = (float)manyNetCnt / (float)placementUnits.size(); } /** * @brief get the proportion of the PlacementUnit objects with high interconnection density * * @return float */ inline float getPUWithManyNetsRatio() { assert(PUWithManyNetsRatio >= 0); return PUWithManyNetsRatio; } /** * @brief record the minimum HPWL during placement procedure * * @param val */ inline void setMinHPWL(float val) { minHPWL = val; } inline float getMinHPWL() { return minHPWL; } /** * @brief check the utlization of the clock regions on the device *@param dump dump the clock utilization */ void checkClockUtilization(bool dump); /** * @brief check whether the given PlacementUnit can be mapped to the site considering the half-column clock * legalization rules * * @param curPU a given PU * @param curSite the target site * @return true if the the given PlacementUnit can be mapped to the site considering the half-column clock * legalization rules * @return false if the the given PlacementUnit CANNOT be mapped to the site considering the half-column clock * legalization rules */ bool checkClockColumnLegalization(PlacementInfo::PlacementUnit *curPU, DeviceInfo::DeviceSite *curSite) { auto clockColumn = curSite->getClockHalfColumn(); auto curSetOfClocks = clockCol2ClockNets[clockColumn]; auto curPUClocks = curPU->getClockNets(); for (auto clockNet : curPUClocks) curSetOfClocks.insert(clockNet); if (curSetOfClocks.size() <= clockColumn->getClockNumLimit()) return true; else return false; } void printOutClockColumnLegalization(PlacementInfo::PlacementUnit *curPU, DeviceInfo::DeviceSite *curSite) { auto clockColumn = curSite->getClockHalfColumn(); auto curSetOfClocks = clockCol2ClockNets[clockColumn]; auto curPUClocks = curPU->getClockNets(); for (auto clockNet : curPUClocks) curSetOfClocks.insert(clockNet); int i = 0; for (auto clockNet : curSetOfClocks) { std::cout << "clock#" << i << " name: [" << clockNet->getName() << "]\n"; i++; } } /** * @brief map the given PlacementUnit to the site for later checking of the half-column clock * legalization rules * * @param curPU a given PU * @param curSite the target site * */ void addPUIntoClockColumn(PlacementInfo::PlacementUnit *curPU, DeviceInfo::DeviceSite *curSite) { auto clockColumn = curSite->getClockHalfColumn(); auto &curSetOfClocks = clockCol2ClockNets[clockColumn]; auto &curPUClocks = curPU->getClockNets(); for (auto clockNet : curPUClocks) curSetOfClocks.insert(clockNet); assert(curSetOfClocks.size() <= clockColumn->getClockNumLimit()); } /** * @brief Get the Long Paths in the net list for later optimization * * @return std::vector<std::vector<PlacementUnit *>>& */ inline std::vector<std::vector<PlacementUnit *>> &getLongPaths() { return longPaths; } /** * @brief make the PlacementUnits in the long path closer to each other * */ void optimizeLongPaths(); /** * @brief call timing info to build simple timing graph * */ void buildSimpleTimingGraph() { simplePlacementTimingInfo->buildSimpleTimingGraph(); longPathThresholdLevel = simplePlacementTimingInfo->getLongPathThresholdLevel(); mediumPathThresholdLevel = simplePlacementTimingInfo->getMediumPathThresholdLevel(); } /** * @brief get the PlacementUnit Mapping to clock region centers for timing optimzation * * @return std::map<PlacementUnit *, std::pair<float, float>>& */ std::map<PlacementUnit *, std::pair<float, float>> &getPU2ClockRegionCenters() { return PU2ClockRegionCenters; } /** * @brief get the PlacementUnit Mapping to clock region column for timing optimzation * * @return std::map<PlacementUnit *, int>& */ std::map<PlacementUnit *, int> &getPU2ClockRegionColumn() { return PU2ClockRegionColumn; } inline int getLongPathThresholdLevel() { return longPathThresholdLevel; } inline int getMediumPathThresholdLevel() { return mediumPathThresholdLevel; } inline std::map<DeviceInfo::ClockColumn *, std::set<DesignInfo::DesignNet *>> &getClockCol2ClockNets() { return clockCol2ClockNets; } inline std::vector<std::vector<PlacementBinInfo *>> &getGlobalBinGrid() { return globalBinGrid; } private: CompatiblePlacementTable *compatiblePlacementTable = nullptr; std::vector<PlacementUnit *> placementUnits; std::vector<PlacementUnpackedCell *> placementUnpackedCells; std::vector<PlacementMacro *> placementMacros; std::vector<PlacementUnit *> fixedPlacementUnits; std::set<DesignInfo::DesignCell *> cellInMacros; std::map<int, PlacementUnit *> cellId2PlacementUnit; std::vector<PlacementUnit *> cellId2PlacementUnitVec; std::vector<CellBinInfo> cellId2CellBinInfo; std::vector<Location> cellId2location; std::vector<Location> pinId2location; DesignInfo *designInfo; DeviceInfo *deviceInfo; PlacementTimingInfo *simplePlacementTimingInfo = nullptr; /** * @brief a mapping from PlaceuementUnit objects to legalized locations * */ std::pair<std::map<PlacementInfo::PlacementUnit *, float>, std::map<PlacementInfo::PlacementUnit *, float>> PULegalXY; /** * @brief a mapping from PlaceuementUnit objects to device sites * */ std::map<PlacementInfo::PlacementUnit *, std::vector<DeviceInfo::DeviceSite *>> PU2LegalSites; /** * @brief left boundary of the device */ float globalMinX; /** * @brief bottom boundary of the device */ float globalMinY; /** * @brief right boundary of the device */ float globalMaxX; /** * @brief top boundary of the device */ float globalMaxY; /** * @brief left boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ float startX; /** * @brief bottom boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ float startY; /** * @brief right boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ float endX; /** * @brief bottom boundary of the bin grid * * the coverage of bin grid is a bit larger than the device. */ float endY; float eps = 1e-5; std::vector<std::vector<std::vector<PlacementBinInfo *>>> SharedBELTypeBinGrid; /** * @brief Bin Grid for LUTs and FFs, mainly for searching neighbor elements during packing * */ std::vector<std::vector<PlacementBinInfo *>> LUTFFBinGrid; /** * @brief Bin Grid includes all types of sites, mainly for congestion evalution * */ std::vector<std::vector<PlacementBinInfo *>> globalBinGrid; std::vector<std::vector<PlacementSiteBinInfo *>> siteGridForMacros; float binWidth; float binHeight; std::vector<PlacementNet *> placementNets; std::vector<std::vector<PlacementNet *>> placementUnitId2Nets; std::vector<PlacementNet *> clockNets; std::vector<std::vector<int>> clockRegionUtilization; std::set<PlacementUnit *> PUSetContainingFF; std::vector<PlacementUnit *> PUsContainingFF; std::vector<std::vector<PlacementUnit *>> longPaths; std::map<PlacementUnit *, std::pair<float, float>> PU2ClockRegionCenters; std::map<PlacementUnit *, int> PU2ClockRegionColumn; std::map<DeviceInfo::ClockColumn *, std::set<DesignInfo::DesignNet *>> clockCol2ClockNets; /** * @brief the retangular clock region coverage of a clock net * */ typedef struct _ClockNetCoverage { PlacementNet *clockNet = nullptr; /** * @brief the left column in the grid of clock regions * */ int leftRegionX; /** * @brief the right column in the grid of clock regions * */ int rightRegionX; /** * @brief the top row in the grid of clock regions * */ int topRegionY; /** * @brief the bottom row in the grid of clock regions * */ int bottomRegionY; } ClockNetCoverage; std::vector<ClockNetCoverage> clockNetCoverages; /** * @brief the progress ratio, indicating the progress of the placement convergence. * * the progress ratio is usually HPWL_lower / HPWL_upper */ float placementProressRatio = 0.01; std::map<std::string, std::string> &JSONCfg; std::string cellType2fixedAmoFileName; std::string cellType2sharedCellTypeFileName; std::string sharedCellType2BELtypeFileName; /** * @brief a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate */ float y2xRatio = 1.0; /** * @brief the long path threshold for timing optimization * */ int longPathThresholdLevel = 10; /** * @brief the medium path threshold for timing optimization * */ int mediumPathThresholdLevel = 5; int dumpPlacementUnitLocationCnt = 0; float oriPseudoNetWeight = -1; int macroPseudoNetEnhanceCnt = -1; float macroLegalizationWeight = -1; float lastProgressWhenLUTFFUtilAdjust = -1.0; float PUWithManyNetsRatio = -1; float minHPWL = 1e8; bool LUTFFUtilizationAdjusted = false; }; std::ostream &operator<<(std::ostream &os, PlacementInfo::PlacementMacro *curMacro); std::ostream &operator<<(std::ostream &os, PlacementInfo::PlacementUnpackedCell *curUnpackedCell); std::ostream &operator<<(std::ostream &os, PlacementInfo::PlacementUnit *curPU); #endif
louvain_imm.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_LOUVAIN_IMM_H #define RIPPLES_LOUVAIN_IMM_H #include <queue> #include <string> #include <type_traits> #include <vector> #include "ripples/find_most_influential.h" #include "ripples/generate_rrr_sets.h" #include "ripples/imm.h" #include "ripples/imm_execution_record.h" #include "spdlog/fmt/ostr.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" namespace ripples { struct LouvainIMMConfiguration : public IMMConfiguration { std::string communityList; void addCmdOptions(CLI::App &app) { IMMConfiguration::addCmdOptions(app); app.add_option("--community-map", communityList, "The filename of the community map.") ->required() ->group("Algorithm Options"); } }; struct LouvainIMMExecutionRecord : public IMMExecutionRecord {}; namespace { template <typename vertex_type> struct Compare { bool operator()(std::pair<vertex_type, size_t> &a, std::pair<vertex_type, size_t> &b) const { return a.second < b.second; } }; } // namespace template <typename GraphTy, typename RRRset, typename execution_tag> auto FindMostInfluentialSet(const std::vector<GraphTy> &communities, size_t k, std::vector<std::vector<RRRset>> &RRRcollection, sequential_tag &&ex_tag) { spdlog::get("console")->info("SeedSelect start"); using vertex_type = typename GraphTy::vertex_type; Compare<vertex_type> cmp; using priorityQueue = std::priority_queue<std::pair<vertex_type, size_t>, std::vector<std::pair<vertex_type, size_t>>, decltype(cmp)>; // Count occurrencies for all communities std::vector<std::vector<uint32_t>> coverageVectors(communities.size()); std::vector<priorityQueue> queues(communities.size()); std::vector<typename std::vector<RRRset>::iterator> ends(communities.size()); double total_delta = 0; #pragma omp parallel for reduction(+ : total_delta) for (size_t i = 0; i < communities.size(); ++i) { coverageVectors[i] = std::vector<uint32_t>(communities[i].num_nodes(), 0); CountOccurrencies(RRRcollection[i].begin(), RRRcollection[i].end(), coverageVectors[i].begin(), coverageVectors[i].end(), std::forward<sequential_tag>(ex_tag)); std::vector<std::pair<vertex_type, size_t>> queue_storage( communities[i].num_nodes()); InitHeapStorage(coverageVectors[i].begin(), coverageVectors[i].end(), queue_storage.begin(), queue_storage.end(), std::forward<sequential_tag>(ex_tag)); queues[i] = std::move(priorityQueue(cmp, std::move(queue_storage))); ends[i] = RRRcollection[i].end(); total_delta += RRRcollection[i].size(); } spdlog::get("console")->flush(); // Init on heap per community using vertex_contribution_pair = std::pair<vertex_type, double>; std::vector<vertex_contribution_pair> global_heap( k + 1, vertex_contribution_pair{-1, -1.0}); std::vector<uint64_t> active_communities(communities.size(), 1); auto heap_cmp = [](const vertex_contribution_pair &a, const vertex_contribution_pair &b) -> bool { return a.second > b.second; }; std::make_heap(global_heap.begin(), global_heap.end(), heap_cmp); // std::mutex global_heap_mutex; // for each communities do in parallel size_t iteration = 0; while (!std::all_of(active_communities.begin(), active_communities.end(), [](const uint64_t &v) -> bool { return v == 0; })) { for (size_t i = 0; i < communities.size(); ++i) { if (active_communities[i] == 0) continue; if (queues[i].empty()) { active_communities[i] = 0; continue; } auto element = queues[i].top(); queues[i].pop(); while (element.second > coverageVectors[i][element.first]) { element.second = coverageVectors[i][element.first]; queues[i].push(element); element = queues[i].top(); queues[i].pop(); } auto cmp = [=](const RRRset &a) -> auto { return !std::binary_search(a.begin(), a.end(), element.first); }; auto itr = partition(RRRcollection[i].begin(), ends[i], cmp, std::forward<sequential_tag>(ex_tag)); if (std::distance(itr, ends[i]) < std::distance(RRRcollection[i].begin(), itr)) { UpdateCounters(itr, ends[i], coverageVectors[i], std::forward<sequential_tag>(ex_tag)); } else { if (std::is_same<execution_tag, omp_parallel_tag>::value) { #pragma omp parallel for simd for (size_t j = 0; j < coverageVectors[i].size(); ++j) coverageVectors[i][j] = 0; } else { std::fill(coverageVectors[i].begin(), coverageVectors[i].end(), 0); } CountOccurrencies(RRRcollection[i].begin(), itr, coverageVectors[i].begin(), coverageVectors[i].end(), std::forward<sequential_tag>(ex_tag)); } ends[i] = itr; double contribution = RRRcollection[i].size() ? (double) element.second / total_delta // typecast to double to stop division from returning zero : 0; vertex_contribution_pair vcp{communities[i].convertID(element.first), contribution}; // Handle the global index insertion // std::lock_guard<std::mutex> _(global_heap_mutex); std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.back() = vcp; std::push_heap(global_heap.begin(), global_heap.end(), heap_cmp); if (global_heap.front() == vcp) active_communities[i] = 0; } } std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.pop_back(); double coverage = 0; std::vector<typename GraphTy::vertex_type> seeds; seeds.reserve(k); std::sort_heap(global_heap.begin(), global_heap.end(), heap_cmp); for (auto e : global_heap) { seeds.push_back(e.first); coverage += e.second; } return seeds; } template <typename GraphTy, typename RRRset, typename execution_tag> auto FindMostInfluentialSet(const std::vector<GraphTy> &communities, size_t k, std::vector<std::vector<RRRset>> &RRRcollection, execution_tag &&ex_tag) { spdlog::get("console")->info("SeedSelect start"); using vertex_type = typename GraphTy::vertex_type; Compare<vertex_type> cmp; using priorityQueue = std::priority_queue<std::pair<vertex_type, size_t>, std::vector<std::pair<vertex_type, size_t>>, decltype(cmp)>; // Count occurrencies for all communities std::vector<std::vector<uint32_t>> coverageVectors(communities.size()); std::vector<priorityQueue> queues(communities.size()); std::vector<typename std::vector<RRRset>::iterator> ends(communities.size()); double total_delta = 0; #pragma omp parallel for schedule(dynamic) reduction(+ : total_delta) for (size_t i = 0; i < communities.size(); ++i) { coverageVectors[i] = std::vector<uint32_t>(communities[i].num_nodes(), 0); CountOccurrencies(RRRcollection[i].begin(), RRRcollection[i].end(), coverageVectors[i].begin(), coverageVectors[i].end(), std::forward<sequential_tag>(sequential_tag{})); std::vector<std::pair<vertex_type, size_t>> queue_storage( communities[i].num_nodes()); InitHeapStorage(coverageVectors[i].begin(), coverageVectors[i].end(), queue_storage.begin(), queue_storage.end(), std::forward<sequential_tag>(sequential_tag{})); queues[i] = std::move(priorityQueue(cmp, std::move(queue_storage))); ends[i] = RRRcollection[i].end(); total_delta += RRRcollection[i].size(); } spdlog::get("console")->flush(); // Init on heap per community using vertex_contribution_pair = std::pair<vertex_type, double>; std::vector<vertex_contribution_pair> global_heap( k + 1, vertex_contribution_pair{-1, -1.0}); std::vector<uint64_t> active_communities(communities.size(), 1); auto heap_cmp = [](const vertex_contribution_pair &a, const vertex_contribution_pair &b) -> bool { return a.second > b.second; }; std::make_heap(global_heap.begin(), global_heap.end(), heap_cmp); std::mutex global_heap_mutex; // for each communities do in parallel // int totalRRCount = 0; // for (size_t i = 0; i < communities.size(); ++i) { // totalRRCount += RRRcollection[i].size(); // } size_t iteration = 0; while (!std::all_of(active_communities.begin(), active_communities.end(), [](const uint64_t &v) -> bool { return v == 0; })) { // int active = 0; // for (size_t i = 0; i < active_communities.size(); ++i) { // if (active_communities[i] == 1) active++; // } // std::cout << "Active Communities:" << active << std::endl; #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < communities.size(); ++i) { if (active_communities[i] == 0) continue; if (queues[i].empty()) { active_communities[i] = 0; continue; } auto element = queues[i].top(); queues[i].pop(); while (element.second > coverageVectors[i][element.first]) { element.second = coverageVectors[i][element.first]; queues[i].push(element); element = queues[i].top(); queues[i].pop(); } auto cmp = [=](const RRRset &a) -> auto { return !std::binary_search(a.begin(), a.end(), element.first); }; auto itr = partition(RRRcollection[i].begin(), ends[i], cmp, std::forward<sequential_tag>(sequential_tag{})); if (std::distance(itr, ends[i]) < std::distance(RRRcollection[i].begin(), itr)) { UpdateCounters(itr, ends[i], coverageVectors[i], std::forward<sequential_tag>(sequential_tag{})); } else { // if (std::is_same<execution_tag, omp_parallel_tag>::value) { // #pragma omp parallel for simd // for (size_t j = 0; j < coverageVectors[i].size(); ++j) // coverageVectors[i][j] = 0; // } else { std::fill(coverageVectors[i].begin(), coverageVectors[i].end(), 0); // } CountOccurrencies(RRRcollection[i].begin(), itr, coverageVectors[i].begin(), coverageVectors[i].end(), std::forward<sequential_tag>(sequential_tag{})); } ends[i] = itr; double contribution = RRRcollection[i].size() ? (double) element.second / total_delta // typecast to double to stop division from returning zero : 0; vertex_contribution_pair vcp{communities[i].convertID(element.first), contribution}; // Handle the global index insertion std::lock_guard<std::mutex> _(global_heap_mutex); std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.back() = vcp; std::push_heap(global_heap.begin(), global_heap.end(), heap_cmp); if (global_heap.front() == vcp) active_communities[i] = 0; } } std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.pop_back(); double coverage = 0; std::vector<typename GraphTy::vertex_type> seeds; seeds.reserve(k); std::sort_heap(global_heap.begin(), global_heap.end(), heap_cmp); for (auto e : global_heap) { seeds.push_back(e.first); coverage += e.second; } return seeds; } template <typename GraphTy, typename ConfTy, typename GeneratorTy, typename RecordTy, typename diff_model_tag> auto LouvainIMM(const std::vector<GraphTy> &communities, ConfTy &CFG, double l, GeneratorTy &gen, std::vector<RecordTy> &records, diff_model_tag &&model_tag, sequential_tag &&ex_tag) { using vertex_type = typename GraphTy::vertex_type; size_t k = CFG.k; double epsilon = CFG.epsilon; using RRRsetCollection = std::vector<RRRset<GraphTy>>; std::vector<RRRsetCollection> R(communities.size()); // For each community do ThetaEstimation and Sampling for (size_t i = 0; i < communities.size(); ++i) { double l_1 = l * (1 + 1 / std::log2(communities[i].num_nodes())); R[i] = Sampling(communities[i], CFG, l_1, gen, records[i], std::forward<diff_model_tag>(model_tag), std::forward<sequential_tag>(ex_tag)); } // Global seed selection using the heap auto S = FindMostInfluentialSet(communities, k, R, std::forward<sequential_tag>(ex_tag)); return std::make_pair(S, records); } //! Influence Maximization using Community Structure. //! //! The algorithm uses the Louvain method for community detection and then //! IMM to select seeds frome the communities. //! //! \tparam GraphTy The type of the input graph. //! \tparam PRNG The type of the parallel random number generator. //! \tparam diff_model_tag Type-Tag to selecte the diffusion model. //! \tparam execution_tag Type-Tag to select the execution policy. //! //! \param communities The input graphs. The graphs are transoposed. //! \param k The size of the seed set. //! \param epsilon The parameter controlling the approximation guarantee. //! \param l Parameter usually set to 1. //! \param gen The parallel random number generator. //! \param model_tag The diffusion model tag. //! \param ex_tag The execution policy tag. template <typename GraphTy, typename ConfTy, typename GeneratorTy, typename diff_model_tag> auto LouvainIMM(const std::vector<GraphTy> &communities, ConfTy &CFG, double l, std::vector<GeneratorTy> &gen, diff_model_tag &&model_tag, omp_parallel_tag &&ex_tag) { using vertex_type = typename GraphTy::vertex_type; size_t k = CFG.k; double epsilon = CFG.epsilon; using RRRsetCollection = std::vector<RRRset<GraphTy>>; std::vector<RRRsetCollection> R(communities.size()); CFG.k = std::ceil((double)CFG.k / (double)communities.size()); // For each community do ThetaEstimation and Sampling for (size_t i = 0; i < communities.size(); ++i) { double l_1 = l * (1 + 1 / std::log2(communities[i].num_nodes())); std::cout << i << std::endl; R[i] = Sampling(communities[i], CFG, l_1, gen[i], gen[i].execution_record(), std::forward<diff_model_tag>(model_tag), std::forward<omp_parallel_tag>(ex_tag)); } // Global seed selection using the heap auto S = FindMostInfluentialSet(communities, k, R, std::forward<omp_parallel_tag>(ex_tag)); std::vector<IMMExecutionRecord> records(communities.size()); for (auto & generator : gen) { records.push_back(generator.execution_record()); } return std::make_pair(S, records); } } // namespace ripples #endif /* RIPPLES_LOUVAIN_IMM_H */
BRKGA.h
/* * BRKGA.h * * This class encapsulates a Biased Random-key Genetic Algorithm (for minimization problems) with K * independent Populations stored in two vectors of Population, current and previous. It supports * multi-threading via OpenMP, and implements the following key methods: * * - BRKGA() constructor: initializes the populations with parameters described below. * - evolve() operator: evolve each Population following the BRKGA methodology. This method * supports OpenMP to evolve up to K independent Populations in parallel. * Please note that double Decoder::decode(...) MUST be thread-safe. * * Required hyperparameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations * - MAX_THREADS: number of threads to perform parallel decoding -- WARNING: Decoder::decode() MUST * be thread-safe! * * Required templates are: * RNG: random number generator that implements the methods below. * - RNG(unsigned long seed) to initialize a new RNG with 'seed' * - double rand() to return a double precision random deviate in range [0,1) * - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1) * - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32 * * Decoder: problem-specific decoder that implements any of the decode methods outlined below. When * compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via * OpenMP), the method must be thread-safe. * - double decode(const vector< double >& chromosome) const, if you don't want to change * chromosomes inside the framework, or * - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome * * Created on : Jun 22, 2010 by rtoso * Last update: Sep 28, 2010 by rtoso * Authors: Rodrigo Franco Toso <rtoso@cs.rutgers.edu> */ #ifndef BRKGA_H #define BRKGA_H #include <omp.h> #include <algorithm> #include <exception> #include <stdexcept> #include "Population.h" template< class Decoder, class RNG > class BRKGA { public: /* * Default constructor * Required hyperparameters: * - n: number of genes in each chromosome * - p: number of elements in each population * - pe: pct of elite items into each population * - pm: pct of mutants introduced at each generation into the population * - rhoe: probability that an offspring inherits the allele of its elite parent * * Optional parameters: * - K: number of independent Populations * - MAX_THREADS: number of threads to perform parallel decoding * WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as * + double Decoder::decode(std::vector< double >& chromosome) const */ BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder, RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1); /** * Destructor */ ~BRKGA(); /** * Resets all populations with brand new keys */ void reset(); /** * Evolve the current populations following the guidelines of BRKGAs * @param generations number of generations (must be even and nonzero) * @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization) * @param M number of elite chromosomes to select from each population in order to exchange */ void evolve(unsigned generations = 1); /** * Exchange elite-solutions between the populations * @param M number of elite chromosomes to select from each population */ void exchangeElite(unsigned M); /** * Set individuals to initial population (only one population in case of multiple ones). * @param chromosomes a set of individuals described as double vectors * between 0 and 1. */ void setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population); /** * Returns the current population */ const Population& getPopulation(unsigned k = 0) const; /** * Returns the chromosome with best fitness so far among all populations */ const std::vector< double >& getBestChromosome() const; /** * Returns the best fitness found so far among all populations */ double getBestFitness() const; // Return copies to the internal parameters: unsigned getN() const; unsigned getP() const; unsigned getPe() const; unsigned getPm() const; unsigned getPo() const; double getRhoe() const; unsigned getK() const; unsigned getMAX_THREADS() const; private: // Hyperparameters: const unsigned n; // number of genes in the chromosome const unsigned p; // number of elements in the population const unsigned pe; // number of elite items in the population const unsigned pm; // number of mutants introduced at each generation into the population const double rhoe; // probability that an offspring inherits the allele of its elite parent // Templates: RNG& refRNG; // reference to the random number generator const Decoder& refDecoder; // reference to the problem-dependent Decoder // Parallel populations parameters: const unsigned K; // number of independent parallel populations const unsigned MAX_THREADS; // number of threads for parallel decoding // Data: std::vector< Population* > previous; // previous populations std::vector< Population* > current; // current populations // Local operations: void initialize(const unsigned i); // initialize current population 'i' with random keys void evolution(Population& curr, Population& next); bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const; }; template< class Decoder, class RNG > BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe, const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) : n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng), refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) { // Error check: using std::range_error; if(n == 0) { throw range_error("Chromosome size equals zero."); } if(p == 0) { throw range_error("Population size equals zero."); } if(pe == 0) { throw range_error("Elite-set size equals zero."); } if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); } if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); } if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); } if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); } // Initialize and decode each chromosome of the current population, then copy to previous: for(unsigned i = 0; i < K; ++i) { // Allocate: current[i] = new Population(n, p); // Initialize: initialize(i); // Then just copy to previous: previous[i] = new Population(*current[i]); } } template< class Decoder, class RNG > BRKGA< Decoder, RNG >::~BRKGA() { for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; } } template< class Decoder, class RNG > const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const { return (*current[k]); } template< class Decoder, class RNG > double BRKGA< Decoder, RNG >::getBestFitness() const { double best = current[0]->fitness[0].first; for(unsigned i = 1; i < K; ++i) { if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; } } return best; } template< class Decoder, class RNG > const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const { unsigned bestK = 0; for(unsigned i = 1; i < K; ++i) { if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; } } return current[bestK]->getChromosome(0); // The top one :-) } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::reset() { for(unsigned i = 0; i < K; ++i) { initialize(i); } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::evolve(unsigned generations) { if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); } for(unsigned i = 0; i < generations; ++i) { for(unsigned j = 0; j < K; ++j) { evolution(*current[j], *previous[j]); // First evolve the population (curr, next) std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next) } } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) { if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); } for(unsigned i = 0; i < K; ++i) { // Population i will receive some elite members from each Population j below: unsigned dest = p - 1; // Last chromosome of i (will be updated below) for(unsigned j = 0; j < K; ++j) { if(j == i) { continue; } // Copy the M best of Population j into Population i: for(unsigned m = 0; m < M; ++m) { // Copy the m-th best of Population j into the 'dest'-th position of Population i: const std::vector< double >& bestOfJ = current[j]->getChromosome(m); std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin()); current[i]->fitness[dest].first = current[j]->fitness[m].first; --dest; } } } for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); } } template< class Decoder, class RNG > void BRKGA< Decoder, RNG >::setInitialPopulation(const std::vector< std::vector< double > >& chromosomes, int ini_population_size, int population) { //current[0] = new Population(n, chromosomes.size()); unsigned i = 0; for(std::vector< std::vector< double > >::const_iterator it_chrom = chromosomes.begin(); it_chrom != chromosomes.end() && i < ini_population_size; ++it_chrom, ++i) { if(it_chrom->size() != n) { throw std::runtime_error("Error on setting initial population: number of genes isn't equal!"); } std::copy(it_chrom->begin(), it_chrom->end(), current[population]->population[i].begin()); //std::cout << "VOU CALCULAR CUSTO" << std::endl; current[population]->setFitness(i, refDecoder.decode((*current[population])(i)) ); } current[population]->sortFitness(); } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) { for(unsigned j = 0; j < p; ++j) { for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); } } // Decode: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int j = 0; j < int(p); ++j) { current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) ); } // Sort: current[i]->sortFitness(); } template< class Decoder, class RNG > inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) { // We now will set every chromosome of 'current', iterating with 'i': unsigned i = 0; // Iterate chromosome by chromosome unsigned j = 0; // Iterate allele by allele // 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current': while(i < pe) { for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); } next.fitness[i].first = curr.fitness[i].first; next.fitness[i].second = i; ++i; } // 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm: while(i < p - pm) { // Select an elite parent: const unsigned eliteParent = (refRNG.randInt(pe - 1)); // Select a non-elite parent: const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1)); // Mate: for(j = 0; j < n; ++j) { const unsigned sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent); next(i, j) = curr(curr.fitness[sourceParent].second, j); //next(i, j) = (refRNG.rand() < rhoe) ? curr(curr.fitness[eliteParent].second, j) : // curr(curr.fitness[noneliteParent].second, j); } ++i; } // We'll introduce 'pm' mutants: while(i < p) { for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); } ++i; } // Time to compute fitness, in parallel: #ifdef _OPENMP #pragma omp parallel for num_threads(MAX_THREADS) #endif for(int i = int(pe); i < int(p); ++i) { next.setFitness( i, refDecoder.decode(next.population[i]) ); } // Now we must sort 'current' by fitness, since things might have changed: next.sortFitness(); } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getN() const { return n; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getP() const { return p; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; } template< class Decoder, class RNG > double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getK() const { return K; } template< class Decoder, class RNG > unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; } #endif
knucleotide-8.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // This controls the initial size used for the hash tables. This needs to be a // power of two because a mask is also calculated from this by using // INITIAL_HASH_TABLE_SIZE-1. #define INITIAL_HASH_TABLE_SIZE 64 // This controls the maximum length for each set of nucleotide sequence // frequencies and each nucleotide sequence count output by this program. #define MAXIMUM_OUTPUT_LENGTH 4096 #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; //****************************************** //*** Start of hash table implementation *** //****************************************** // In order to prevent too many collisions from occurring the hash table is // grown when it is filled to a certain percentage. This value sets the // percentage that controls when growing should occur. This value must be set as // a fraction between 0 and 1 but sane values are generally around 3/4. Setting // the value too low causes the hash table to be made larger than it needs to be // which reduces the effectiveness of caches and setting it too high will cause // a large amount of collisions. #define HASH_TABLE_LOAD_LIMIT 12/16 typedef struct element{ #define EMPTY_VALUE_KEY -1 int64_t key; // If key is negative, then this element is empty, // otherwise key and value contain the unmodified key // and value. int32_t value; } element; typedef struct hash_table{ intnative_t size; // The current capacity of the hash table. Never // will actually be reached since the hash table // will be grown first when it reaches // element_Limit. int64_t key_Mask; // ANDed with keys to make sure that hash table // indexes do not exceed the size of the hash // table. intnative_t element_Limit; // Controls the maximum amount of elements that // are allowed in the hash table before it will // be grown. intnative_t element_Count; // The current amount of elements in the hash // table. element * elements; } hash_table; // Create a hash table with space allocated for requested_Size elements. // requested_Size must be a power of two since the mask for keys is defined as // requested_Size-1. static hash_table * create_Hash_Table(intnative_t requested_Size){ hash_table * created_Hash_Table=malloc(sizeof(hash_table)); // Initialize the properties for the created_Hash_Table. created_Hash_Table->size=requested_Size; created_Hash_Table->key_Mask=requested_Size-1; created_Hash_Table->element_Limit=requested_Size*HASH_TABLE_LOAD_LIMIT; created_Hash_Table->element_Count=0; created_Hash_Table->elements=malloc(requested_Size*sizeof(element)); // Initialize all elements in the created_Hash_Table to have initial keys // set to EMPTY_VALUE_KEY and values set to 0. for(intnative_t i=0; i<requested_Size; i++) created_Hash_Table->elements[i]=(element){EMPTY_VALUE_KEY, 0}; return created_Hash_Table; } // Destroy hash table pointed to by hash_Table_To_Destroy and all of its // elements. static void destroy_Hash_Table(hash_table * hash_Table_To_Destroy){ free(hash_Table_To_Destroy->elements); free(hash_Table_To_Destroy); } // Hash function used to hash keys. #define hash_Key(key) (key ^ key>>7) // Grow hash_Table_To_Grow by quadrupling it in size. A new elements array is // created, the existing elements are inserted into the new elements array, the // old elements array is deleted, and the properties for hash_Table_To_Grow are // updated. static void grow_Hash_Table(hash_table * const hash_Table_To_Grow){ const intnative_t old_Hash_Table_Size=hash_Table_To_Grow->size; const intnative_t new_Hash_Table_Size=old_Hash_Table_Size*4; // Keep a reference to old_Hash_Table_Elements and allocate space for // new_Hash_Table_Elements. element * const old_Hash_Table_Elements=hash_Table_To_Grow->elements; element * const new_Hash_Table_Elements=malloc(new_Hash_Table_Size* sizeof(element)); // Update the properties for the hash_Table_To_Grow. hash_Table_To_Grow->size=new_Hash_Table_Size; hash_Table_To_Grow->key_Mask=new_Hash_Table_Size-1; hash_Table_To_Grow->element_Limit=new_Hash_Table_Size*HASH_TABLE_LOAD_LIMIT; hash_Table_To_Grow->elements=new_Hash_Table_Elements; // Initialize all elements in new_Hash_Table_Elements to have initial keys // set to EMPTY_VALUE_KEY and values set to 0. for(intnative_t i=0; i<new_Hash_Table_Size; i++) new_Hash_Table_Elements[i]=(element){EMPTY_VALUE_KEY, 0}; // Copy all old_Hash_Table_Elements to new_Hash_Table_Elements. This code is // simpler and faster than using the find_Or_Add_Element_For_Key() function // since we don't need to worry about updating element_Count and checking to // see if we have reached element_Limit. for(intnative_t i=0; i<old_Hash_Table_Size; i++){ if(old_Hash_Table_Elements[i].key>=0){ int64_t elements_Index=hash_Key(old_Hash_Table_Elements[i].key) & hash_Table_To_Grow->key_Mask; // Find the first free spot in new_Hash_Table_Elements and copy the // old element to it. while(new_Hash_Table_Elements[elements_Index].key>=0){ elements_Index++; elements_Index&=hash_Table_To_Grow->key_Mask; } new_Hash_Table_Elements[elements_Index]=old_Hash_Table_Elements[i]; } }; free(old_Hash_Table_Elements); } // See if key is already in hash_Table and if so then return the element for it, // otherwise add the key to hash_table (and grow it if necessary) and return the // element for it. static inline element * find_Or_Add_Element_For_Key( hash_table * const hash_Table, const int64_t key){ int64_t elements_Index=hash_Key(key) & hash_Table->key_Mask; // Search hash_Table for key. element * const elements=hash_Table->elements; while(elements[elements_Index].key!=key){ // If we reach a key with a negative value then that means that key is // not in hash_Table so we will go ahead and add it. if(elements[elements_Index].key<0){ // If we're at the hash table's load limit then grow the hash table // and call this function a second time to add and return an item. if(hash_Table->element_Count>=hash_Table->element_Limit){ grow_Hash_Table(hash_Table); return find_Or_Add_Element_For_Key(hash_Table, key); } // Set the key for this element to key, increment element_Count, and // break out of the loop so that this element will be returned. elements[elements_Index].key=key; hash_Table->element_Count++; break; } // Still haven't found key or a free spot so continue to the next index. elements_Index++; elements_Index&=hash_Table->key_Mask; } return &(elements[elements_Index]); } //****************************************** //*** End of hash table implementation *** //****************************************** // Function to use when sorting elements with qsort() later. Elements with // larger values will come first and in cases of identical values then elements // with smaller keys will come first. static int element_Compare(const void * uncasted_Left_Element, const void * uncasted_Right_Element){ const element * left_Element=uncasted_Left_Element, * right_Element=uncasted_Right_Element; // Sort based on element values. if(left_Element->value < right_Element->value) return 1; if(left_Element->value > right_Element->value) return -1; // If we got here then both items have the same value so then sort based on // key. if(left_Element->key > right_Element->key) return 1; else return -1; } // Macro to convert a nucleotide character to a code. Note that upper and lower // case ASCII letters only differ in the fifth bit from the right and we only // need the three least significant bits to differentiate the letters 'A', 'C', // 'G', and 'T'. Spaces in this array/string will never be used as long as // characters other than 'A', 'C', 'G', and 'T' aren't used. #define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0x7]) // And one more macro to convert the codes back to nucleotide characters. #define nucleotide_For_Code(code) ("ACGT"[code & 0x3]) // Generate frequences for all nucleotide sequences in sequences that are of // length sequence_Length and then save it to output. static void generate_Frequencies_For_Sequences(char * sequences, intnative_t sequences_Length, intnative_t sequence_Length, char * output){ hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE); // Add all the sequences of sequence_Length to hash_Table. int64_t code=0; for(intnative_t i=0; i<sequences_Length; i++){ const int64_t mask=((int64_t)1<<2*sequence_Length)-1; code=(code<<2 & mask) | sequences[i]; if(i>=sequence_Length-1) find_Or_Add_Element_For_Key(hash_Table, code)->value++; } // Create an array of elements from hash_Table. intnative_t elements_Array_Size=hash_Table->element_Count; element * elements_Array=malloc(elements_Array_Size*sizeof(element)); for(intnative_t i=0, j=0; i<hash_Table->size; i++){ if(hash_Table->elements[i].key>=0){ elements_Array[j].key=hash_Table->elements[i].key; elements_Array[j].value=hash_Table->elements[i].value; j++; } } // Sort elements_Array. qsort(elements_Array, elements_Array_Size, sizeof(element), &element_Compare); // Calculate the total count of all elements. intnative_t total_Count=0; for(intnative_t i=0; i<elements_Array_Size; i++) total_Count+=elements_Array[i].value; // Print the frequencies for each element. for(intnative_t output_Position=0, i=0; i<elements_Array_Size; i++){ // Decode key back into a nucleotide sequence. char nucleotide_Sequence[sequence_Length+1]; for(intnative_t j=sequence_Length-1; j>-1; j--){ nucleotide_Sequence[j]=nucleotide_For_Code(elements_Array[i].key); elements_Array[i].key>>=2; } nucleotide_Sequence[sequence_Length]='\0'; // Output the frequency for nucleotide_Sequence to output. output_Position+=snprintf(output+output_Position, MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n", nucleotide_Sequence, 100.0f*elements_Array[i].value/total_Count); } free(elements_Array); destroy_Hash_Table(hash_Table); } // Generate a count for the number of time nucleotide_Sequence appears in // sequences and then save it to output. static void generate_Count_For_Sequence(char * sequences, const intnative_t sequences_Length, const char * nucleotide_Sequence, char * output){ const intnative_t nucleotide_Sequence_Length=strlen(nucleotide_Sequence); hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE); // Add all the sequences of nucleotide_Sequence_Length to hash_Table. int64_t key=0; for(intnative_t i=0; i<sequences_Length; i++){ const int64_t mask=((int64_t)1<<2*nucleotide_Sequence_Length)-1; key=(key<<2 & mask) | sequences[i]; if(i>=nucleotide_Sequence_Length) find_Or_Add_Element_For_Key(hash_Table, key)->value++; } // Generate key for the sequence. key=0; for(intnative_t i=0; i<nucleotide_Sequence_Length; i++) key=(key<<2) | code_For_Nucleotide(nucleotide_Sequence[i]); // Output the count for nucleotide_Sequence to output. intnative_t count=find_Or_Add_Element_For_Key(hash_Table, key)->value; snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%jd\t%s", (intmax_t)count, nucleotide_Sequence); destroy_Hash_Table(hash_Table); } int main(){ char buffer[4096]; // Find the start of the third nucleotide sequence. while(fgets(buffer, sizeof(buffer), stdin) && memcmp(">THREE", buffer, sizeof(">THREE")-1)); // Start with 1 MB of storage for reading in the nucleotide sequence and // grow exponentially. intnative_t nucleotide_Sequence_Capacity=1048576; intnative_t nucleotide_Sequence_Size=0; char * nucleotide_Sequence=malloc(nucleotide_Sequence_Capacity); // Start reading and encoding the third nucleotide sequence. while(fgets(buffer, sizeof(buffer), stdin) && buffer[0]!='>'){ for(intnative_t i=0; buffer[i]!='\0'; i++){ if(buffer[i]!='\n') nucleotide_Sequence[nucleotide_Sequence_Size++]= code_For_Nucleotide(buffer[i]); } // Make sure we still have enough memory allocated for any potential // nucleotides in the next line. if(nucleotide_Sequence_Capacity-nucleotide_Sequence_Size < sizeof(buffer)){ nucleotide_Sequence_Capacity*=2; nucleotide_Sequence=realloc(nucleotide_Sequence, nucleotide_Sequence_Capacity); } } // Free up any leftover memory. nucleotide_Sequence=realloc(nucleotide_Sequence, nucleotide_Sequence_Size); char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH]; // Do the following functions in parallel. #pragma omp parallel sections { #pragma omp section { generate_Frequencies_For_Sequences(nucleotide_Sequence, nucleotide_Sequence_Size, 1, output_Buffer[0]); } #pragma omp section { generate_Frequencies_For_Sequences(nucleotide_Sequence, nucleotide_Sequence_Size, 2, output_Buffer[1]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGT", output_Buffer[2]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTA", output_Buffer[3]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATT", output_Buffer[4]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATTTTAATT", output_Buffer[5]); } #pragma omp section { generate_Count_For_Sequence(nucleotide_Sequence, nucleotide_Sequence_Size, "GGTATTTTAATTTATAGT", output_Buffer[6]); } } for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++])); free(nucleotide_Sequence); return 0; }
prime1.c
#include <stdio.h> #define LEFT 30000000 #define RIGHT 30000200 int main() { int i, j; int mark; int count; count=0; #pragma omp parallel for private(j, mark) for (i=LEFT;i<=RIGHT;++i) { mark=1; for (j=2;j<i/2;++j) { if (i%j==0) { mark=0; break; } } if (mark==1) { #pragma omp critical count++; } } printf("%d\n", count); return 0; }
6.race3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s // Taken from ompVerify, Sec 4.3 #include <omp.h> #define N 20 int main() { int i, j, A[N], a[N][N], B[N][N]; #pragma omp parallel for private(j) for (i = 0; i < N; i++) for (j = 0; j < i; j++) { A[j] = a[i][j]; B[i][j] = A[j]; } } // CHECK: Data Race detected // END
palindrome_trie.c
#include <ctype.h> #include <omp.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_LEN 32 typedef struct _TRIE_NODE { struct _TRIE_NODE* child[128]; bool end; } trie_node; trie_node* trie; char dictionary[(1 << 15)][MAX_LEN]; void add_trie(const char* string) { trie_node* cur = trie; for (int i = 0; string[i] != '\0'; i++) { const int index = string[i]; #pragma omp critical { if (cur->child[index] == NULL) { cur->child[index] = (trie_node*) calloc(1, sizeof(trie_node)); } } cur = cur->child[index]; } cur->end = true; } bool query_reverse_trie(const char* query) { char reversed[MAX_LEN] = {'\0'}; for (int i = strlen(query) - 1, j = 0; i >= 0; i--, j++) { reversed[j] = query[i]; } trie_node* cur = trie; for (int i = 0; reversed[i] != '\0'; i++) { const int index = reversed[i]; if (cur->child[index] == NULL) { return false; } cur = cur->child[index]; } return cur->end; } int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "Usage: %s THREAD_NUM INPUT_FILE OUTPUT_FILE\n", argv[0]); return 1; } const int THREAD_NUM = atoi(argv[1]); const char* INPUT_PATH = argv[2]; const char* OUTPUT_PATH = argv[3]; FILE *input, *output; int words_cnt = 0; double start, end; if ((input = fopen(INPUT_PATH, "r")) == NULL) { fprintf(stderr, "Error while opening file %s\n", INPUT_PATH); return 1; } if ((output = fopen(OUTPUT_PATH, "w")) == NULL) { fprintf(stderr, "Error while opening file %s\n", OUTPUT_PATH); return 1; } start = omp_get_wtime(); while (fgets(dictionary[words_cnt], MAX_LEN, input) != NULL) { const int len = strlen(dictionary[words_cnt]); if (strcmp(dictionary[words_cnt] + len - 2, "\r\n") == 0) { dictionary[words_cnt][len - 2] = '\0'; } if (strlen(dictionary[words_cnt]) > 0) { words_cnt++; } } trie = (trie_node*) calloc(1, sizeof(trie_node)); omp_set_num_threads(THREAD_NUM); #pragma omp parallel for for (int i = 0; i < words_cnt; i++) { add_trie(dictionary[i]); } #pragma omp parallel for for (int i = 0; i < words_cnt; i++) { if (query_reverse_trie(dictionary[i])) { fprintf(output, "%s\n", dictionary[i]); } } end = omp_get_wtime(); printf("Configuration: %d threads\tTime: %f\n", THREAD_NUM, end - start); fclose(input); fclose(output); return 0; }
GB_unop__log2_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fp32_fp32) // op(A') function: GB (_unop_tran__log2_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = log2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = log2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target_teams_distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd foo void test_no_clause() { int i; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp target teams distribute simd' must be a for loop}} #pragma omp target teams distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd; for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} #pragma omp target teams distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp target teams distribute simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}} for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute simd' directive may not be firstprivate, predetermined as lastprivate}} for (int j = 0; j < 16; ++j) #pragma omp parallel for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp target teams distribute simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{expected variable name}} #pragma omp target teams distribute simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp target teams distribute simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp target teams distribute simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp target teams distribute simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp target teams distribute simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp target teams distribute simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp target teams distribute simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} for (int i = 0; i < 10; ++i) ; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
no_wait_2.c
/* Time to solve it for 250000000 numbers: Parallel: 6s Sequential: 4 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <omp.h> static void no_wait_example_2(const unsigned int n, float *a, float *b, float *c, float *y, float *z) { unsigned int i = 0; #pragma omp parallel { #pragma omp for schedule(static) nowait for(i = 0; i < n; i++) c[i] = (a[i] + b[i])/2.0f; #pragma omp for schedule(static) nowait for(i = 0; i < n; i++) z[i] = sqrtf(c[i]); #pragma omp for schedule(static) nowait for(i = 1; i < n; i++) y[i] = z[i-1] + a[i]; } } static void without_no_wait_example_2(const unsigned int n, float *a, float *b, float *c, float *y, float *z) { unsigned int i = 0; for(i = 0; i < n; i++) c[i] = (a[i] + b[i])/2.0f; for(i = 0; i < n; i++) z[i] = sqrtf(c[i]); for(i = 1; i < n; i++) y[i] = z[i-1] + a[i]; } static float *initializer(unsigned int limit) { float *array = malloc(sizeof(float) * limit); float counter = 1.0; if(NULL != array) { for(unsigned int i = 0; i < limit; i++) { array[i] = counter; counter += 1; } } return array; } int main(int argc, char **argv) { const unsigned int n = 250000000; float start_parallel = 0, end_parallel = 0, start_sequential = 0, end_sequential = 0; float *a = NULL, *b = NULL, *c = NULL, *y = NULL, *z = NULL; a = initializer(n); b = initializer(n); c = initializer(n); y = initializer(n); z = initializer(n); start_parallel = clock()/CLOCKS_PER_SEC; no_wait_example_2(n, a, b, c, y, z); end_parallel = clock()/CLOCKS_PER_SEC; free(a); free(b); free(c); free(y); free(z); a = initializer(n); b = initializer(n); c = initializer(n); y = initializer(n); z = initializer(n); start_sequential = clock()/CLOCKS_PER_SEC; without_no_wait_example_2(n, a, b, c, y, z); end_sequential = clock()/CLOCKS_PER_SEC; free(a); free(b); free(c); free(y); free(z); printf("\nTime to solve it for %d numbers:\n\tParallel: %.0fs\n\tSequential: %.0f\n", n, end_parallel - start_parallel, end_sequential - start_sequential); return 0; }
GB_unop__identity_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_int64) // op(A') function: GB (_unop_tran__identity_fp32_int64) // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_int64) ( float *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
compiler_cgen.c
/* Generated by Nim Compiler v0.15.0 */ /* (c) 2016 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 64 #include "nimbase.h" #include <string.h> typedef struct Tcgen529027 Tcgen529027; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct Ropeobj178006 Ropeobj178006; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct Cell47304 Cell47304; typedef struct Cellseq47320 Cellseq47320; typedef struct Gcheap49818 Gcheap49818; typedef struct Gcstack49816 Gcstack49816; typedef struct Memregion29486 Memregion29486; typedef struct Smallchunk29440 Smallchunk29440; typedef struct Llchunk29480 Llchunk29480; typedef struct Bigchunk29442 Bigchunk29442; typedef struct Intset29414 Intset29414; typedef struct Trunk29410 Trunk29410; typedef struct Avlnode29484 Avlnode29484; typedef struct Gcstat49814 Gcstat49814; typedef struct Cellset47316 Cellset47316; typedef struct Pagedesc47312 Pagedesc47312; typedef struct Ttypeseq292836 Ttypeseq292836; typedef struct Ttype292840 Ttype292840; typedef struct Intset268030 Intset268030; typedef struct Trunk268026 Trunk268026; typedef struct Trunkseq268028 Trunkseq268028; typedef struct Tpasscontext341002 Tpasscontext341002; typedef struct Tsym292834 Tsym292834; typedef struct Tidobj199004 Tidobj199004; typedef struct TNimObject TNimObject; typedef struct TY292929 TY292929; typedef struct Tstrtable292806 Tstrtable292806; typedef struct Tsymseq292804 Tsymseq292804; typedef struct Tident199010 Tident199010; typedef struct Tlineinfo191336 Tlineinfo191336; typedef struct Tnode292802 Tnode292802; typedef struct Tloc292816 Tloc292816; typedef struct Tlib292820 Tlib292820; typedef struct TY529153 TY529153; typedef struct TY203018 TY203018; typedef struct Tidtable292850 Tidtable292850; typedef struct Tidpairseq292848 Tidpairseq292848; typedef struct Tlinkedlist147013 Tlinkedlist147013; typedef struct Tlistentry147007 Tlistentry147007; typedef struct Tcproc529021 Tcproc529021; typedef struct Tnodetable292862 Tnodetable292862; typedef struct Tnodepairseq292860 Tnodepairseq292860; typedef struct Debuginfo203009 Debuginfo203009; typedef struct TY203021 TY203021; typedef struct TY203023 TY203023; typedef struct Tnodeseq292796 Tnodeseq292796; typedef struct TY191350 TY191350; typedef struct TY529095 TY529095; typedef struct Trodreader332021 Trodreader332021; typedef struct TY292960 TY292960; typedef struct TY203017 TY203017; typedef struct Enumdesc203007 Enumdesc203007; typedef struct Tinfocc273008 Tinfocc273008; typedef struct Tblock529019 Tblock529019; typedef struct Ttraversalclosure537019 Ttraversalclosure537019; typedef struct TY135002 TY135002; typedef struct Tbitset339004 Tbitset339004; typedef struct TY191612 TY191612; typedef struct Tfileinfo191334 Tfileinfo191334; typedef struct Tinfoos176035 Tinfoos176035; typedef struct Tinfocpu176476 Tinfocpu176476; typedef struct Tstrentry147009 Tstrentry147009; typedef struct TY128506 TY128506; typedef struct Basechunk29438 Basechunk29438; typedef struct Freecell29430 Freecell29430; typedef struct Tinstantiation292824 Tinstantiation292824; typedef struct Tidpair292846 Tidpair292846; typedef struct Tnodepair292858 Tnodepair292858; typedef struct Filenamemapping203005 Filenamemapping203005; typedef struct TY332033 TY332033; typedef struct Tindex332019 Tindex332019; typedef struct Tiitable299142 Tiitable299142; typedef struct Tiipairseq299140 Tiipairseq299140; typedef struct Table332054 Table332054; typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057; typedef struct Memfile330202 Memfile330202; typedef struct TY292961 TY292961; typedef struct Tiipair299138 Tiipair299138; typedef struct Keyvaluepair332060 Keyvaluepair332060; typedef NU8 Tnimkind3403; typedef NU8 Tnimtypeflag3409Set; typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0); typedef N_NIMCALL_PTR(void*, TY3494) (void* p0); struct TNimType { NI size; Tnimkind3403 kind; Tnimtypeflag3409Set flags; TNimType* base; TNimNode* node; void* finalizer; TY3489 marker; TY3494 deepcopy; }; typedef NU8 Tnimnodekind3405; struct TNimNode { Tnimnodekind3405 kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void); struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct Cell47304 { NI refcount; TNimType* typ; }; struct Cellseq47320 { NI len; NI cap; Cell47304** d; }; typedef Smallchunk29440* TY29501[512]; typedef Trunk29410* Trunkbuckets29412[256]; struct Intset29414 { Trunkbuckets29412 data; }; struct Memregion29486 { NI minlargeobj; NI maxlargeobj; TY29501 freesmallchunks; Llchunk29480* llmem; NI currmem; NI maxmem; NI freemem; NI lastsize; Bigchunk29442* freechunkslist; Intset29414 chunkstarts; Avlnode29484* root; Avlnode29484* deleted; Avlnode29484* last; Avlnode29484* freeavlnodes; NIM_BOOL locked; }; struct Gcstat49814 { NI stackscans; NI cyclecollections; NI maxthreshold; NI maxstacksize; NI maxstackcells; NI cycletablesize; NI64 maxpause; }; struct Cellset47316 { NI counter; NI max; Pagedesc47312* head; Pagedesc47312** data; }; struct Gcheap49818 { Gcstack49816* stack; void* stackbottom; NI cyclethreshold; Cellseq47320 zct; Cellseq47320 decstack; Cellseq47320 tempstack; NI recgclock; Memregion29486 region; Gcstat49814 stat; Cellset47316 marked; Cellseq47320 additionalroots; }; struct Intset268030 { NI counter; NI max; Trunk268026* head; Trunkseq268028* data; }; struct TNimObject { TNimType* m_type; }; struct Tidobj199004 { TNimObject Sup; NI id; }; typedef NU8 Tsymkind292435; struct Tstrtable292806 { NI counter; Tsymseq292804* data; }; typedef NU16 Tmagic292524; struct Tlineinfo191336 { NI16 line; NI16 col; NI32 fileindex; }; typedef NU32 Tsymflag292184Set; typedef NU32 Toption169009Set; typedef NU8 Tlockind292808; typedef NU8 Tstorageloc292812; typedef NU16 Tlocflag292810Set; struct Tloc292816 { Tlockind292808 k; Tstorageloc292812 s; Tlocflag292810Set flags; Ttype292840* t; Ropeobj178006* r; }; struct Tsym292834 { Tidobj199004 Sup; Tsymkind292435 kind; union{ struct {Ttypeseq292836* typeinstcache; } S1; struct {TY292929* procinstcache; Tsym292834* gcunsafetyreason; } S2; struct {TY292929* usedgenerics; Tstrtable292806 tab; } S3; struct {Tsym292834* guard; NI bitsize; } S4; } kindU; Tmagic292524 magic; Ttype292840* typ; Tident199010* name; Tlineinfo191336 info; Tsym292834* owner; Tsymflag292184Set flags; Tnode292802* ast; Toption169009Set options; NI position; NI offset; Tloc292816 loc; Tlib292820* annex; Tnode292802* constraint; }; struct TY203018 { NimStringDesc* Field0; NI Field1; }; struct Tpasscontext341002 { TNimObject Sup; NIM_BOOL fromcache; }; typedef Ropeobj178006* Tcfilesections529009[18]; typedef NU8 Codegenflag529025Set; struct Tidtable292850 { NI counter; Tidpairseq292848* data; }; struct Tlinkedlist147013 { Tlistentry147007* head; Tlistentry147007* tail; NI counter; }; struct Tnodetable292862 { NI counter; Tnodepairseq292860* data; }; typedef Ropeobj178006* TY529136[10]; struct Tcgen529027 { Tpasscontext341002 Sup; Tcfilesections529009 s; Codegenflag529025Set flags; Tsym292834* module; NimStringDesc* filename; NimStringDesc* cfilename; Ropeobj178006* tmpbase; Tidtable292850 typecache; Tidtable292850 forwtypecache; Intset268030 declaredthings; Intset268030 declaredprotos; Tlinkedlist147013 headerfiles; Intset268030 typeinfomarker; Tcproc529021* initproc; Tcproc529021* postinitproc; Tcproc529021* preinitproc; Ttypeseq292836* typestack; Tnodetable292862 datacache; Tsymseq292804* forwardedprocs; NI typenodes; NI nimtypes; Ropeobj178006* typenodesname; Ropeobj178006* nimtypesname; NI labels; TY529136 extensionloaders; Ropeobj178006* injectstmt; }; struct Debuginfo203009 { NI version; TY203021* files; TY203023* enums; NIM_BOOL conflicts; }; struct Tident199010 { Tidobj199004 Sup; NimStringDesc* s; Tident199010* next; NI h; }; struct Tcproc529021 { Tsym292834* prc; NIM_BOOL beforeretneeded; NIM_BOOL threadvaraccessed; Tlineinfo191336 lastlineinfo; Tnodeseq292796* nestedtrystmts; NI inexceptblock; TY191350* finallysafepoints; NI labels; TY529095* blocks; NI breakidx; Toption169009Set options; NI maxframelen; Tcgen529027* module; NI withinloop; NI splitdecls; NI gcframeid; Ropeobj178006* gcframetype; }; typedef NU8 Tsymflag292184; typedef NU8 Codegenflag529025; typedef NU8 Toption169009; typedef NU64 Tglobaloption169013Set; typedef NU8 Tglobaloption169013; typedef NU8 Tcommands169076; typedef NU16 Tnodeflag292427Set; typedef NU8 Tnodekind292020; struct Tnode292802 { Ttype292840* typ; Tlineinfo191336 info; Tnodeflag292427Set flags; Tnodekind292020 kind; union{ struct {NI64 intval; } S1; struct {NF floatval; } S2; struct {NimStringDesc* strval; } S3; struct {Tsym292834* sym; } S4; struct {Tident199010* ident; } S5; struct {Tnodeseq292796* sons; } S6; } kindU; NimStringDesc* comment; }; typedef Ropeobj178006* TY533289[1]; typedef NU8 Tlocflag292810; struct Tlistentry147007 { TNimObject Sup; Tlistentry147007* prev; Tlistentry147007* next; }; typedef NU8 Tlibkind292818; struct Tlib292820 { Tlistentry147007 Sup; Tlibkind292818 kind; NIM_BOOL generated; NIM_BOOL isoverriden; Ropeobj178006* name; Tnode292802* path; }; typedef NU8 Tcfilesection529005; typedef NU8 Ttypekind292244; typedef NU8 Tcallingconvention292002; typedef NU32 Ttypeflag292431Set; struct Ttype292840 { Tidobj199004 Sup; Ttypekind292244 kind; Tcallingconvention292002 callconv; Ttypeflag292431Set flags; Ttypeseq292836* sons; Tnode292802* n; Tsym292834* owner; Tsym292834* sym; Tsym292834* destructor; Tsym292834* deepcopy; Tsym292834* assignment; TY292960* methods; NI64 size; NI16 align; NI16 locklevel; Tloc292816 loc; }; typedef Ropeobj178006* TY532811[2]; typedef NU8 Tctypekind529007; typedef NU64 Ttypekind292244Set; typedef NU8 Ttypeflag292431; typedef NimStringDesc* TY533943[14]; typedef NU8 Tprefereddesc320011; typedef Ropeobj178006* TY178507[1]; struct Enumdesc203007 { NI size; NU32 owner; NI id; NimStringDesc* name; TY203017* values; }; typedef Ropeobj178006* TY535235[4]; typedef NimStringDesc* TY292016[10]; typedef Ropeobj178006* TY535238[3]; struct Ropeobj178006 { TNimObject Sup; Ropeobj178006* left; Ropeobj178006* right; NI length; NimStringDesc* data; }; typedef NU8 Tinfoccprop273004Set; struct Tinfocc273008 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; NimStringDesc* Field12; NimStringDesc* Field13; NimStringDesc* Field14; NimStringDesc* Field15; NimStringDesc* Field16; NimStringDesc* Field17; NimStringDesc* Field18; NimStringDesc* Field19; Tinfoccprop273004Set Field20; }; typedef Tinfocc273008 TY273427[13]; typedef NU8 Tsystemcc273002; typedef NU8 Tnodeflag292427; typedef NU8 Tcprocsection529011; typedef Ropeobj178006* Tcprocsections529013[3]; struct Tblock529019 { NI id; Ropeobj178006* label; Tcprocsections529013 sections; NIM_BOOL isloop; NI16 nestedtrystmts; NI16 nestedexceptstmts; NI16 framelen; }; typedef NU8 Tgcmode169080; typedef NU8 Ttypeinforeason537016; struct Ttraversalclosure537019 { Tcproc529021* p; NimStringDesc* visitorfrmt; }; typedef NU8 Ttypefieldresult320145; typedef NU8 Tinfoccprop273004; typedef Ropeobj178006* TY536847[6]; typedef Ropeobj178006* TY536401[7]; typedef Ropeobj178006* TY536475[5]; typedef NU16 Tmsgkind191002; typedef NU8 Tassignmentflag538302Set; typedef NU8 Tassignmentflag538302; typedef NimStringDesc* TY552655[19]; typedef NimStringDesc* TY551642[3]; typedef NimStringDesc* TY556765[4]; typedef NimStringDesc* TY551828[42]; typedef NimStringDesc* TY551281[7]; typedef NU8 Trenderflag311004Set; typedef NimStringDesc* TY557052[2]; typedef NU8 Tclosuretypekind535681; typedef NimStringDesc* TY556428[6]; typedef NU8 Tanalysisresult473003; typedef NU8 char136Set[32]; typedef NU8 Tdistinctcompare324427; typedef NU8 Ttypecmpflag324429Set; typedef NU16 Tspecialword275003; typedef NU8 Tsystemos176004; struct Tfileinfo191334 { NimStringDesc* fullpath; NimStringDesc* projpath; NimStringDesc* shortname; Ropeobj178006* quotedname; Ropeobj178006* quotedfullname; TY191350* lines; NimStringDesc* dirtyfile; }; typedef NU8 Tinfoosprop176031Set; struct Tinfoos176035 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; Tinfoosprop176031Set Field12; }; typedef Tinfoos176035 TY176082[24]; typedef NU8 Tendian176474; struct Tinfocpu176476 { NimStringDesc* Field0; NI Field1; Tendian176474 Field2; NI Field3; NI Field4; }; typedef Tinfocpu176476 TY176510[19]; typedef NU8 Tsystemcpu176452; struct Tstrentry147009 { Tlistentry147007 Sup; NimStringDesc* data; }; struct TY128506 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; }; struct Gcstack49816 { Gcstack49816* prev; Gcstack49816* next; void* starts; void* pos; NI maxstacksize; }; struct Basechunk29438 { NI prevsize; NI size; NIM_BOOL used; }; struct Smallchunk29440 { Basechunk29438 Sup; Smallchunk29440* next; Smallchunk29440* prev; Freecell29430* freelist; NI free; NI acc; NF data; }; struct Llchunk29480 { NI size; NI acc; Llchunk29480* next; }; struct Bigchunk29442 { Basechunk29438 Sup; Bigchunk29442* next; Bigchunk29442* prev; NI align; NF data; }; typedef NI TY29419[8]; struct Trunk29410 { Trunk29410* next; NI key; TY29419 bits; }; typedef Avlnode29484* TY29491[2]; struct Avlnode29484 { TY29491 link; NI key; NI upperbound; NI level; }; struct Pagedesc47312 { Pagedesc47312* next; NI key; TY29419 bits; }; struct Trunk268026 { Trunk268026* next; NI key; TY29419 bits; }; struct Tidpair292846 { Tidobj199004* key; TNimObject* val; }; struct Tnodepair292858 { NI h; Tnode292802* key; NI val; }; struct Filenamemapping203005 { NimStringDesc* package; NimStringDesc* file; NU32 mangled; }; typedef NU8 Treasonforrecompile332002; struct Tiitable299142 { NI counter; Tiipairseq299140* data; }; struct Tindex332019 { NI lastidxkey; NI lastidxval; Tiitable299142 tab; NimStringDesc* r; NI offset; }; struct Table332054 { Keyvaluepairseq332057* data; NI counter; }; struct Memfile330202 { void* mem; NI size; int handle; }; struct Trodreader332021 { TNimObject Sup; NI pos; NCSTRING s; Toption169009Set options; Treasonforrecompile332002 reason; TY332033* moddeps; TY332033* files; NI dataidx; NI convertersidx; NI initidx; NI interfidx; NI compilerprocsidx; NI methodsidx; NimStringDesc* filename; Tindex332019 index; Tindex332019 imports; NI readerindex; NI line; NI moduleid; Table332054 syms; Memfile330202 memfile; Tsymseq292804* methods; NimStringDesc* origfile; NIM_BOOL inviewmode; }; struct TY292961 { NI Field0; Tsym292834* Field1; }; struct Freecell29430 { Freecell29430* next; NI zerofield; }; struct Tinstantiation292824 { Tsym292834* sym; Ttypeseq292836* concretetypes; NI compilesid; }; struct Tiipair299138 { NI key; NI val; }; struct Keyvaluepair332060 { NI Field0; NI Field1; Tsym292834* Field2; }; struct Ttypeseq292836 { TGenericSeq Sup; Ttype292840* data[SEQ_DECL_SIZE]; }; struct TY529153 { TGenericSeq Sup; Tcgen529027* data[SEQ_DECL_SIZE]; }; struct Tsymseq292804 { TGenericSeq Sup; Tsym292834* data[SEQ_DECL_SIZE]; }; struct TY203017 { TGenericSeq Sup; TY203018 data[SEQ_DECL_SIZE]; }; struct TY135002 { TGenericSeq Sup; NimStringDesc* data[SEQ_DECL_SIZE]; }; struct Tbitset339004 { TGenericSeq Sup; NI8 data[SEQ_DECL_SIZE]; }; struct TY529095 { TGenericSeq Sup; Tblock529019 data[SEQ_DECL_SIZE]; }; struct TY191350 { TGenericSeq Sup; Ropeobj178006* data[SEQ_DECL_SIZE]; }; struct Tnodeseq292796 { TGenericSeq Sup; Tnode292802* data[SEQ_DECL_SIZE]; }; struct TY191612 { TGenericSeq Sup; Tfileinfo191334 data[SEQ_DECL_SIZE]; }; struct Trunkseq268028 { TGenericSeq Sup; Trunk268026* data[SEQ_DECL_SIZE]; }; struct TY292929 { TGenericSeq Sup; Tinstantiation292824* data[SEQ_DECL_SIZE]; }; struct Tidpairseq292848 { TGenericSeq Sup; Tidpair292846 data[SEQ_DECL_SIZE]; }; struct Tnodepairseq292860 { TGenericSeq Sup; Tnodepair292858 data[SEQ_DECL_SIZE]; }; struct TY203021 { TGenericSeq Sup; Filenamemapping203005 data[SEQ_DECL_SIZE]; }; struct TY203023 { TGenericSeq Sup; Enumdesc203007 data[SEQ_DECL_SIZE]; }; struct TY292960 { TGenericSeq Sup; TY292961 data[SEQ_DECL_SIZE]; }; struct TY332033 { TGenericSeq Sup; NI32 data[SEQ_DECL_SIZE]; }; struct Tiipairseq299140 { TGenericSeq Sup; Tiipair299138 data[SEQ_DECL_SIZE]; }; struct Keyvaluepairseq332057 { TGenericSeq Sup; Keyvaluepair332060 data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d0, NI op0); N_NIMCALL(void, T839829468_2)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0); N_NIMCALL(void, T839829468_3)(void); N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0); static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0); static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0); static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0); N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47320* s0, Cell47304* c0); N_NIMCALL(void, T839829468_5)(void); N_NIMCALL(void, T839829468_6)(void); static N_INLINE(void, nimGCunrefNoCycle)(void* p0); N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0); N_NIMCALL(void, T839829468_7)(void); N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result); N_NOINLINE(void, chckNil)(void* p0); N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0); N_NIMCALL(void, T839829468_8)(void); N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0); N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0); N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0); N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0); N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0); N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0); N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0); static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0); static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0); N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0); N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0); N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0); N_NIMCALL(NimStringDesc*, rawNewString)(NI space0); N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0); N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0); N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0); static N_INLINE(void, asgnRef)(void** dest0, void* src0); static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0); static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0); N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0); N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0); N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0); N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0); N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0); N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0); N_NIMCALL(NimStringDesc*, tofullpath_192261_155036129)(NI32 fileidx0); N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0); N_NIMCALL(NimStringDesc*, tofilename_192257_155036129)(NI32 fileidx0); N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0); N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0); N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0); N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0); N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0); N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0); static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0); N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0); N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0); N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0); N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0); N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0); N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0); N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0); N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0); N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0); N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0); N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0); N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0); N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0); static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0); N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0); N_NIMCALL(Tsym292834*, getcompilerproc_338748_3937434831)(NimStringDesc* name0); N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0); N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0); N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0); N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0); N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0); N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0); N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0); N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0); N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0); N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0); N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0); static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0); N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0); N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0); N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0); N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0); N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0); N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0); N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0); N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0); N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0); N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0); N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0); static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0); N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0); N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0); N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0); N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0); N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0); N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0); N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0); N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0); N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0); N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0); N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0); N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0); N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0); N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0); N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0); N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0); N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009* self0, NimStringDesc* ename0, NI id0, NU32 owner0); N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0); static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0); N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0); N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0); N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0); N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0); N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0); N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0); N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0); N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0); N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0); N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0); N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0); N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0); N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0); N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0); N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0); N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0); N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0); N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0); static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0); N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0); N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0); N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0); N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0); static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0); N_NIMCALL(Tnode292802*, getbody_335226_1724185294)(Tsym292834* s0); N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0); N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0); N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0); N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0); N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0); N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0); N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0); N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0); N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0); N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0); N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0); N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0); N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0); N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468); N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0); N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0); N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0); N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0); N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0); N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0); N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0); N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0); N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0); N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0); N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0); N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0); N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0); static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0); N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0); N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0); N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0); N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0); N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468); static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0); N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0); N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0); N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0); N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0); N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0); N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0); N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0); N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0); N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0); N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0); N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0); N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0); N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0); N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0); N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0); N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0); N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0); N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0); N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0); N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0); N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0); N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0); N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0); N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0); N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0); N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0); N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0); N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0); N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0); N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0); N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0); N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0); N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0); N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0); N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0); N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0); N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0); static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void); static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0); N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0); N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0); static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void); N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result); N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0); N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0); N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0); N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0); N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0); N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0); N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0); N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0); N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0); N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0); N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0); N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0); N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0); N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0); N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0); N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0); static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0); static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0); N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0); N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(Ropeobj178006*, sourceline_192065_155036129)(Tlineinfo191336 i0); N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0); N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0); N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0); N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0); N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0); N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0); N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0); N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0); N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0); N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0); N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0); N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0); N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0); N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0); N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0); N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0); N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0); N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0); N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0); N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0); N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468); N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0); N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0); N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0); N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0); N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0); N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0); N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0); N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0); N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0); N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0); N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0); static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0); N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0); N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0); N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0); N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0); static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0); N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0); N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0); N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0); N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468); N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0); N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0); N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0); N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0); N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0); N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0); static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0); N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0); N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0); N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0); N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0); N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0); N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0); N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0); N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0); N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0); N_NIMCALL(Tnode292802*, newstrnode_293677_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0); N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0); N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0); N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0); N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0); N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0); N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0); N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0); N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0); N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0); N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0); N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0); N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0); N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0); N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0); N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0); N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0); static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0); N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0); static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0); N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0); N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0); N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0); N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0); N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0); N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0); N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0); static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0); N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0); static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0); N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0); N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0); N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0); N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0); static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0); N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0); N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0); N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0); N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0); N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0); N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0); N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0); N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0); N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0); N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0); N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0); static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0); N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0); N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0); N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0); N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0); N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0); N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0); N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0); N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0); N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0); N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0); static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0); N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0); N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0); N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0); N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0); N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0); N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0); N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(Tsym292834*, skipgenericowner_297280_850551059)(Tsym292834* s0); N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0); N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0); N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0); N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0); N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0); N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0); N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void); N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0); N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0); N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0); N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0); N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0); N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0); N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0); N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0); N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0); N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0); N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0); N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0); N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0); static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0); N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0); N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0); N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0); N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0); N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0); N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0); N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0); N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0); N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0); N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0); N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result); N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0); N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0); N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0); STRING_LITERAL(T839829468_4, "\011", 1); STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17); NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10), ((NI) 1158)} ; STRING_LITERAL(T839829468_11, "T", 1); STRING_LITERAL(T839829468_12, "_", 1); STRING_LITERAL(T839829468_13, "added pending module twice: ", 28); STRING_LITERAL(T839829468_14, ".h", 2); STRING_LITERAL(T839829468_15, ".cpp", 4); STRING_LITERAL(T839829468_16, ".m", 2); STRING_LITERAL(T839829468_17, ".c", 2); STRING_LITERAL(T839829468_18, "0", 1); STRING_LITERAL(T839829468_19, "$", 1); STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30); STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15); STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13); STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13); STRING_LITERAL(T839829468_24, "static ", 7); STRING_LITERAL(T839829468_25, "mapType", 7); STRING_LITERAL(T839829468_26, "void", 4); STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24); STRING_LITERAL(T839829468_28, "TY", 2); STRING_LITERAL(T839829468_29, "getTypeName: ", 13); STRING_LITERAL(T839829468_30, "void*", 5); STRING_LITERAL(T839829468_31, "NimStringDesc", 13); STRING_LITERAL(T839829468_32, "NimStringDesc*", 14); STRING_LITERAL(T839829468_33, "NCSTRING", 8); STRING_LITERAL(T839829468_34, "NIM_BOOL", 8); STRING_LITERAL(T839829468_35, "NIM_CHAR", 8); STRING_LITERAL(T839829468_36, "NI", 2); STRING_LITERAL(T839829468_37, "NI8", 3); STRING_LITERAL(T839829468_38, "NI16", 4); STRING_LITERAL(T839829468_39, "NI32", 4); STRING_LITERAL(T839829468_40, "NI64", 4); STRING_LITERAL(T839829468_41, "NF", 2); STRING_LITERAL(T839829468_42, "NF32", 4); STRING_LITERAL(T839829468_43, "NF64", 4); STRING_LITERAL(T839829468_44, "NF128", 5); STRING_LITERAL(T839829468_45, "NU", 2); STRING_LITERAL(T839829468_46, "NU8", 3); STRING_LITERAL(T839829468_47, "NU16", 4); STRING_LITERAL(T839829468_48, "NU32", 4); STRING_LITERAL(T839829468_49, "NU64", 4); NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36), ((NimStringDesc*) &T839829468_37), ((NimStringDesc*) &T839829468_38), ((NimStringDesc*) &T839829468_39), ((NimStringDesc*) &T839829468_40), ((NimStringDesc*) &T839829468_41), ((NimStringDesc*) &T839829468_42), ((NimStringDesc*) &T839829468_43), ((NimStringDesc*) &T839829468_44), ((NimStringDesc*) &T839829468_45), ((NimStringDesc*) &T839829468_46), ((NimStringDesc*) &T839829468_47), ((NimStringDesc*) &T839829468_48), ((NimStringDesc*) &T839829468_49)} ; STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30); STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28); STRING_LITERAL(T839829468_52, "&", 1); STRING_LITERAL(T839829468_53, "*", 1); STRING_LITERAL(T839829468_54, "$1 $2;$n", 8); STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(T839829468_56, "union", 5); STRING_LITERAL(T839829468_57, "struct", 6); STRING_LITERAL(T839829468_58, "getTypeForward(", 15); STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18); STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17); STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18); STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18); STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20); STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(T839829468_65, "N_NIMCALL", 9); STRING_LITERAL(T839829468_66, "N_STDCALL", 9); STRING_LITERAL(T839829468_67, "N_CDECL", 7); STRING_LITERAL(T839829468_68, "N_SAFECALL", 10); STRING_LITERAL(T839829468_69, "N_SYSCALL", 9); STRING_LITERAL(T839829468_70, "N_INLINE", 8); STRING_LITERAL(T839829468_71, "N_NOINLINE", 10); STRING_LITERAL(T839829468_72, "N_FASTCALL", 10); STRING_LITERAL(T839829468_73, "N_CLOSURE", 9); STRING_LITERAL(T839829468_74, "N_NOCONV", 8); NIM_CONST TY292016 Callingconvtostr_533587_839829468 = {((NimStringDesc*) &T839829468_65), ((NimStringDesc*) &T839829468_66), ((NimStringDesc*) &T839829468_67), ((NimStringDesc*) &T839829468_68), ((NimStringDesc*) &T839829468_69), ((NimStringDesc*) &T839829468_70), ((NimStringDesc*) &T839829468_71), ((NimStringDesc*) &T839829468_72), ((NimStringDesc*) &T839829468_73), ((NimStringDesc*) &T839829468_74)} ; STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}" " $1;$n", 69); STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28); STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34); STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31); STRING_LITERAL(T839829468_79, "TGenericSeq", 11); STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39); STRING_LITERAL(T839829468_82, "<", 1); STRING_LITERAL(T839829468_83, " COMMA ", 7); STRING_LITERAL(T839829468_84, "> ", 2); extern NIM_CONST TY273427 Cc_273413_2528170400; STRING_LITERAL(T839829468_85, " {$n", 4); STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24); STRING_LITERAL(T839829468_87, " : public $1 {$n", 16); STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15); STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18); STRING_LITERAL(T839829468_90, "$1.$2", 5); STRING_LITERAL(T839829468_91, "S", 1); STRING_LITERAL(T839829468_92, "struct {", 8); STRING_LITERAL(T839829468_93, "} $1;$n", 7); STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38); STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17); STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18); STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11); STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20); STRING_LITERAL(T839829468_100, "char dummy;$n", 13); STRING_LITERAL(T839829468_101, "};", 2); STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9); STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13); STRING_LITERAL(T839829468_104, "char dummy;", 11); STRING_LITERAL(T839829468_105, "Set", 3); STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18); STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15); STRING_LITERAL(T839829468_109, "genProcParams", 13); STRING_LITERAL(T839829468_110, ", ", 2); STRING_LITERAL(T839829468_111, " ", 1); STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12); STRING_LITERAL(T839829468_113, " Result", 7); STRING_LITERAL(T839829468_114, "void* ClEnv", 11); STRING_LITERAL(T839829468_115, "...", 3); STRING_LITERAL(T839829468_116, "void)", 5); STRING_LITERAL(T839829468_117, ")", 1); STRING_LITERAL(T839829468_118, "(", 1); STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12); STRING_LITERAL(T839829468_120, "proc has no result symbol", 25); STRING_LITERAL(T839829468_121, " register", 9); STRING_LITERAL(T839829468_122, " volatile", 9); STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10); STRING_LITERAL(T839829468_124, "(*$1)", 5); STRING_LITERAL(T839829468_125, ";", 1); STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name " "= $2;$n", 70); STRING_LITERAL(T839829468_127, "NTI$1", 5); STRING_LITERAL(T839829468_128, "(&", 2); STRING_LITERAL(T839829468_129, "TNimType", 8); STRING_LITERAL(T839829468_130, "TNimNode", 8); STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30); STRING_LITERAL(T839829468_132, "0", 1); STRING_LITERAL(T839829468_133, "void*", 5); STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16); STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23); STRING_LITERAL(T839829468_137, "genTypeInfo(", 12); STRING_LITERAL(T839829468_138, "$1[$2]", 6); STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15); STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16); STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29); STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35); STRING_LITERAL(T839829468_147, "$1 a;$n", 7); STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12); STRING_LITERAL(T839829468_149, "LOC", 3); STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13); STRING_LITERAL(T839829468_151, "<string.h>", 10); STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35); STRING_LITERAL(T839829468_153, ".Sup", 4); STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17); STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22); STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35); STRING_LITERAL(T839829468_157, "len", 3); STRING_LITERAL(T839829468_158, "Sup.len", 7); STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(T839829468_160, "}$n", 3); STRING_LITERAL(T839829468_161, "$1.Sup", 6); STRING_LITERAL(T839829468_162, "genTraverseProc", 15); STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18); STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17); STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21); STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16); STRING_LITERAL(T839829468_167, "IL64($1)", 8); STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(T839829468_169, "NIM_TRUE", 8); STRING_LITERAL(T839829468_170, "NIM_FALSE", 9); STRING_LITERAL(T839829468_171, "ULL", 3); STRING_LITERAL(T839829468_172, "(($1) $2)", 9); STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(T839829468_174, "NIM_NIL", 7); STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27); STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23); STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25); STRING_LITERAL(T839829468_179, "genLiteral(", 11); STRING_LITERAL(T839829468_180, "case $1:$n", 10); STRING_LITERAL(T839829468_181, "default:$n", 10); STRING_LITERAL(T839829468_182, "break;$n", 8); STRING_LITERAL(T839829468_183, "} $n", 4); STRING_LITERAL(T839829468_184, "genTraverseProc()", 17); STRING_LITERAL(T839829468_185, "$1.Field$2", 10); STRING_LITERAL(T839829468_186, "$1.ClEnv", 8); STRING_LITERAL(T839829468_187, "$1->data[$2]", 12); STRING_LITERAL(T839829468_188, "a", 1); STRING_LITERAL(T839829468_189, "(*a)", 4); STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(T839829468_191, "$1;$n", 5); STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17); STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17); STRING_LITERAL(T839829468_195, "NI $1;$n", 8); STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32); STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11); STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34); STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26); STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(T839829468_207, "genObjectFields", 15); STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(T839829468_209, "\011return $1;$n", 13); STRING_LITERAL(T839829468_210, "Result", 6); STRING_LITERAL(T839829468_211, "closure generation failed", 25); STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18); STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21); STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18); STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19); STRING_LITERAL(T839829468_216, "$N$1 {$N", 8); STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22); STRING_LITERAL(T839829468_218, "nimFrame", 8); STRING_LITERAL(T839829468_219, "VarSlot", 7); STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25); STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16); STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17); STRING_LITERAL(T839829468_223, "{", 1); STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16); STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51); STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15); STRING_LITERAL(T839829468_227, "}$N", 3); STRING_LITERAL(T839829468_228, "static void* $1;$n", 18); STRING_LITERAL(T839829468_229, "||", 2); STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47); STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57); STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60); STRING_LITERAL(T839829468_233, "loadDynamicLib", 14); STRING_LITERAL(T839829468_234, "Dl_$1", 5); STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21); NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10), ((NI) 535)} ; STRING_LITERAL(T839829468_237, "wrong index: ", 13); STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_239, "$2 $1;$n", 8); STRING_LITERAL(T839829468_240, "extern ", 7); STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14); STRING_LITERAL(T839829468_242, " $1;$n", 6); STRING_LITERAL(T839829468_243, "cgsym: ", 7); STRING_LITERAL(T839829468_244, ": ", 2); STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15); STRING_LITERAL(T839829468_246, "extern \"C\" ", 11); STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23); STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26); STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28); STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35); STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34); STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32); STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23); STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35); STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33); STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47); STRING_LITERAL(T839829468_257, ".", 1); STRING_LITERAL(T839829468_258, "ClEnv", 5); STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22); STRING_LITERAL(T839829468_260, "Field$1", 7); STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53); STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50); STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43); STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21); NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265), ((NI) 320)} ; STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60); STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63); STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_269, "genAssignment: ", 15); STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48); STRING_LITERAL(T839829468_271, "expr: proc not init ", 20); STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(T839829468_273, "{$n", 3); STRING_LITERAL(T839829468_274, "0x$1,$n", 7); STRING_LITERAL(T839829468_275, "0x$1, ", 6); STRING_LITERAL(T839829468_276, "0x$1}$n", 7); STRING_LITERAL(T839829468_277, "{{$1, $1}", 9); STRING_LITERAL(T839829468_278, ", {", 3); STRING_LITERAL(T839829468_279, ",$n", 3); STRING_LITERAL(T839829468_280, "}", 1); STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 =" " $4;$n", 69); STRING_LITERAL(T839829468_282, "(($1)&$2)", 9); STRING_LITERAL(T839829468_283, "$1,$n", 5); STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(T839829468_285, "expr: var not init ", 19); STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24); STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50); STRING_LITERAL(T839829468_288, "NimTV->", 7); STRING_LITERAL(T839829468_289, "expr: temp not init ", 20); STRING_LITERAL(T839829468_290, "expr: param not init ", 21); STRING_LITERAL(T839829468_291, "expr(", 5); STRING_LITERAL(T839829468_292, "); unknown symbol", 17); STRING_LITERAL(T839829468_293, "//", 2); STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16); STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16); STRING_LITERAL(T839829468_296, "LA", 2); STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18); STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(T839829468_299, "$1: ;$n", 7); STRING_LITERAL(T839829468_300, "!($1)", 5); STRING_LITERAL(T839829468_301, "$1", 2); STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(T839829468_303, "-($1)", 5); STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22); STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19); STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21); STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20); STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22); STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22); STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20); STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19); STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20); STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22); STRING_LITERAL(T839829468_314, "((double) ($1))", 15); STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18); STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18); NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_302), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304), ((NimStringDesc*) &T839829468_305), ((NimStringDesc*) &T839829468_306), ((NimStringDesc*) &T839829468_307), ((NimStringDesc*) &T839829468_308), ((NimStringDesc*) &T839829468_309), ((NimStringDesc*) &T839829468_310), ((NimStringDesc*) &T839829468_311), ((NimStringDesc*) &T839829468_312), ((NimStringDesc*) &T839829468_313), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_315), ((NimStringDesc*) &T839829468_316)} ; STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33); STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13); NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304)} ; STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22); STRING_LITERAL(T839829468_320, "+", 1); STRING_LITERAL(T839829468_321, "-", 1); STRING_LITERAL(T839829468_322, "/", 1); NIM_CONST TY556765 opr_556763_839829468 = {((NimStringDesc*) &T839829468_320), ((NimStringDesc*) &T839829468_321), ((NimStringDesc*) &T839829468_53), ((NimStringDesc*) &T839829468_322)} ; STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16); STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16); STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13); STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13); STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13); STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(T839829468_341, "($1 == $2)", 10); STRING_LITERAL(T839829468_342, "($1 <= $2)", 10); STRING_LITERAL(T839829468_343, "($1 < $2)", 9); STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(T839829468_351, "($1 != $2)", 10); NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325), ((NimStringDesc*) &T839829468_326), ((NimStringDesc*) &T839829468_327), ((NimStringDesc*) &T839829468_328), ((NimStringDesc*) &T839829468_329), ((NimStringDesc*) &T839829468_330), ((NimStringDesc*) &T839829468_331), ((NimStringDesc*) &T839829468_332), ((NimStringDesc*) &T839829468_333), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_336), ((NimStringDesc*) &T839829468_337), ((NimStringDesc*) &T839829468_338), ((NimStringDesc*) &T839829468_339), ((NimStringDesc*) &T839829468_340), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_344), ((NimStringDesc*) &T839829468_345), ((NimStringDesc*) &T839829468_346), ((NimStringDesc*) &T839829468_347), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_348), ((NimStringDesc*) &T839829468_349), ((NimStringDesc*) &T839829468_350), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_351)} ; STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46); STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13); STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13); STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13); STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13); STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13); NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354), ((NimStringDesc*) &T839829468_355), ((NimStringDesc*) &T839829468_356), ((NimStringDesc*) &T839829468_357), ((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354)} ; STRING_LITERAL(T839829468_358, "((NU8)($1))", 11); STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43); STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25); NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361), ((NimStringDesc*) &T839829468_362), ((NimStringDesc*) &T839829468_363), ((NimStringDesc*) &T839829468_364), ((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23); STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23); STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23); STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23); STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23); NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366), ((NimStringDesc*) &T839829468_367), ((NimStringDesc*) &T839829468_368), ((NimStringDesc*) &T839829468_369), ((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_370, "($#)($#)", 8); STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18); STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14); STRING_LITERAL(T839829468_373, "#reprBool($1)", 13); STRING_LITERAL(T839829468_374, "#reprChar($1)", 13); STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21); STRING_LITERAL(T839829468_376, "#reprStr($1)", 12); STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16); STRING_LITERAL(T839829468_378, "$1, $1Len0", 10); STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16); STRING_LITERAL(T839829468_380, "$1, $2", 6); STRING_LITERAL(T839829468_381, "genRepr()", 9); STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22); STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16); STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34); STRING_LITERAL(T839829468_385, "($1 - 1)", 8); STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14); STRING_LITERAL(T839829468_387, "binaryStmt", 10); STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11); STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11); NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388), ((NimStringDesc*) &T839829468_389)} ; NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22); STRING_LITERAL(T839829468_391, "$1->$2 + ", 9); STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24); STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27); STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24); STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31); STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47); STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39); STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16); STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11); STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23); STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18); STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26); STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25); STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13); STRING_LITERAL(T839829468_405, "$1 == 0", 7); STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16); STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18); STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17); STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17); STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18); STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17); STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43); STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14); STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15); STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17); STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25); STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34); STRING_LITERAL(T839829468_418, "($1)", 4); STRING_LITERAL(T839829468_419, "sizeof($1)", 10); STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26); STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23); STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20); STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28); STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23); STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20); STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27); STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16); STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13); STRING_LITERAL(T839829468_430, "(($1) ($2))", 11); STRING_LITERAL(T839829468_431, "($1Len0-1)", 10); STRING_LITERAL(T839829468_432, "$1Len0", 6); STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26); STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21); STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27); STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22); STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23); STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18); STRING_LITERAL(T839829468_439, "genArrayLen()", 13); STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13); STRING_LITERAL(T839829468_441, "$1->len", 7); STRING_LITERAL(T839829468_442, "unaryStmt", 9); STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16); STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18); STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29); STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54); STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46); STRING_LITERAL(T839829468_448, "($1- $2)", 8); STRING_LITERAL(T839829468_449, "$1 |= ((", 8); STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19); STRING_LITERAL(T839829468_451, ")*8));$n", 8); STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10); STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23); STRING_LITERAL(T839829468_454, ")*8)));$n", 9); STRING_LITERAL(T839829468_455, "#countBits32($1)", 16); STRING_LITERAL(T839829468_456, "#countBits64($1)", 16); STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29); STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16); STRING_LITERAL(T839829468_459, "($1 & $2)", 9); STRING_LITERAL(T839829468_460, "($1 | $2)", 9); STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11); STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9); STRING_LITERAL(T839829468_463, "fewCmps", 7); STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(T839829468_465, "$1 == $2", 8); STRING_LITERAL(T839829468_466, " || ", 4); STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30); STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31); STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31); STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(T839829468_472, "genSetOp()", 10); STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13); STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$n", 88); STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);" "$n", 129); STRING_LITERAL(T839829468_478, "|", 1); STRING_LITERAL(T839829468_479, "& ~", 3); STRING_LITERAL(T839829468_480, "^", 1); NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476), ((NimStringDesc*) &T839829468_477), ((NimStringDesc*) &T839829468_52), ((NimStringDesc*) &T839829468_478), ((NimStringDesc*) &T839829468_479), ((NimStringDesc*) &T839829468_480)} ; STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16); STRING_LITERAL(T839829468_482, ")==0)", 5); STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(T839829468_484, "genSetOp", 8); STRING_LITERAL(T839829468_485, "$1->data", 8); STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22); STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29); STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26); STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14); STRING_LITERAL(T839829468_490, "", 0); STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22); STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20); STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51); STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9); STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22); STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31); STRING_LITERAL(T839829468_497, ";$n", 3); STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21); NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499), ((NI) 423)} ; static NIM_CONST char136Set T839829468_500 = { 0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ; STRING_LITERAL(T839829468_501, "wrong argument count", 20); STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40); NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499), ((NI) 328)} ; STRING_LITERAL(T839829468_504, "->", 2); STRING_LITERAL(T839829468_505, ");$n", 4); STRING_LITERAL(T839829468_506, "[", 1); NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499), ((NI) 472)} ; STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31); STRING_LITERAL(T839829468_509, "Result: ", 8); STRING_LITERAL(T839829468_510, "];$n", 4); STRING_LITERAL(T839829468_511, "]", 1); NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265), ((NI) 925)} ; STRING_LITERAL(T839829468_513, "<stdio.h>", 9); STRING_LITERAL(T839829468_514, ", \"nil\"", 7); STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22); STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15); STRING_LITERAL(T839829468_517, "%s", 2); STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17); STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34); STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62); STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13); STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14); STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28); STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39); STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20); STRING_LITERAL(T839829468_530, "$1 |=((", 7); STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20); STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21); STRING_LITERAL(T839829468_533, "genObjConstr", 12); STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52); STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55); STRING_LITERAL(T839829468_536, "LOC$1.source", 12); STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38); STRING_LITERAL(T839829468_538, "LOC$#.dest", 10); STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46); STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45); STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12); STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50); STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_545, "genTupleElem", 12); STRING_LITERAL(T839829468_546, ".Field$1", 8); STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20); STRING_LITERAL(T839829468_548, "genDeref ", 9); STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17); STRING_LITERAL(T839829468_550, "genRecordField 3", 16); STRING_LITERAL(T839829468_551, ".$1", 3); STRING_LITERAL(T839829468_552, "} $1: ;$n", 9); STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13); STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13); STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19); STRING_LITERAL(T839829468_556, "goto $1;$n", 10); STRING_LITERAL(T839829468_557, "genIf()", 7); STRING_LITERAL(T839829468_558, "->Sup", 5); STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11); STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34); STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26); STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21); STRING_LITERAL(T839829468_563, "chckRangeF", 10); STRING_LITERAL(T839829468_564, "chckRange64", 11); STRING_LITERAL(T839829468_565, "chckRange", 9); STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11); STRING_LITERAL(T839829468_567, "closure to closure created", 26); STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31); STRING_LITERAL(T839829468_569, "while (1) {$n", 13); STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51); STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51); STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50); STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41); STRING_LITERAL(T839829468_574, "TMP$1", 5); STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23); STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9); STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11); STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15); STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46); STRING_LITERAL(T839829468_580, "TMP$#:$n", 8); STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16); STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37); STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_585, "$2* $1;$n", 9); STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34); STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28); STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25); STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31); STRING_LITERAL(T839829468_590, "$#($#);$n", 9); STRING_LITERAL(T839829468_591, "$# = $#;$n", 10); STRING_LITERAL(T839829468_592, "genVarTuple", 11); STRING_LITERAL(T839829468_593, "genConstStmt", 12); STRING_LITERAL(T839829468_594, "for statement not eliminated", 28); STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34); STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33); STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21); STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12); STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9); STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36); STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24); STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14); STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15); STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23); STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18); STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25); STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45); STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17); STRING_LITERAL(T839829468_609, "no loop to break", 16); STRING_LITERAL(T839829468_610, "extern $1", 9); STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62); STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18); STRING_LITERAL(T839829468_613, "\"", 1); STRING_LITERAL(T839829468_614, "\\n\"\012", 4); STRING_LITERAL(T839829468_615, "Exception", 9); STRING_LITERAL(T839829468_616, "E_Base", 6); STRING_LITERAL(T839829468_617, "try {$n", 7); STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30); STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26); STRING_LITERAL(T839829468_620, "else ", 5); STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26); STRING_LITERAL(T839829468_622, "if ($1) ", 8); STRING_LITERAL(T839829468_623, "throw;$n", 8); STRING_LITERAL(T839829468_624, "<setjmp.h>", 10); STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17); STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22); STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12); STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33); STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12); STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39); STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12); STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34); STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23); STRING_LITERAL(T839829468_634, "else {$n", 8); STRING_LITERAL(T839829468_635, "else", 4); STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16); STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46); STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42); STRING_LITERAL(T839829468_639, "if ($1) {$n", 11); STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42); STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39); STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22); STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15); STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14); STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18); STRING_LITERAL(T839829468_646, "bp", 2); STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57); STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47); STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58); STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21); NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651), ((NI) 145)} ; STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12); STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26); STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24); STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31); STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39); STRING_LITERAL(T839829468_657, "); unknown node kind", 20); NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651), ((NI) 1122)} ; STRING_LITERAL(T839829468_659, "Init000", 7); STRING_LITERAL(T839829468_660, "DatInit000", 10); STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41); STRING_LITERAL(T839829468_662, "\011$1();$N", 8); STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa" "in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N" "imMainInner;$N$2\011(*inner)();$N}$N$N", 162); STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N " " HINSTANCE hPrevInstance, $N LP" "STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program" "_result;$N}$N$N", 206); STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC" "L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()" ";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175); STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N " " LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC" "ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175); STRING_LITERAL(T839829468_667, "<windows.h>", 11); STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59); STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim" "MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void" " (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011(" "*inner)();$N}$N$N", 208); STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48); STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;" "$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog" "ram_result;$N}$N$N", 145); STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21); STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19); STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26); STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40); STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa" "in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner" " = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168); STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30); STRING_LITERAL(T839829468_678, "still forwarded: ", 17); STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42); STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26); STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26); STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25); STRING_LITERAL(T839829468_683, "}$N$N", 5); STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46); STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(T839829468_686, "0.15.0", 6); STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); extern NIM_CONST TY176082 Os_176068_4151366050; extern NIM_CONST TY176510 Cpu_176496_4151366050; STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22); STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20); STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15); STRING_LITERAL(T839829468_692, "#include $1$N", 13); STRING_LITERAL(T839829468_693, "extern \"C\"", 10); STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(T839829468_695, "__$1__", 6); STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17); Tcgen529027* generatedheader_532201_839829468; extern TNimType NTI529015; /* BModule */ Ropeobj178006* indent_532655_839829468; extern TNimType NTI178004; /* Rope */ extern Gcheap49818 gch_49858_1689653243; Ropeobj178006* nimtv_538656_839829468; Ttypeseq292836* nimtvdeps_538674_839829468; extern TNimType NTI292836; /* TTypeSeq */ Intset268030 nimtvdeclared_538675_839829468; extern TNimType NTI268030; /* IntSet */ NI breakpointid_548860_839829468; Ropeobj178006* gbreakpoints_548861_839829468; extern TY529153* gmodules_529170_3723162438; extern TNimType NTI529027; /* TCGen */ extern Debuginfo203009 gdebuginfo_203470_1926258066; extern Toption169009Set goptions_169128_2607990831; extern TNimType NTI292804; /* TSymSeq */ extern Tglobaloption169013Set gglobaloptions_169130_2607990831; extern NimStringDesc* headerfile_169138_2607990831; extern NimStringDesc* gprojectfull_169211_2607990831; extern Tcommands169076 gcmd_169132_2607990831; extern NI gerrorcounter_192069_155036129; extern Ropeobj178006* rnl_178903_2381377266; extern NI gforwardedprocscounter_529171_3723162438; extern TNimType NTI292244; /* TTypeKind */ extern TNimType NTI203017; /* seq[(string, int)] */ extern Tsystemcc273002 ccompiler_273431_2528170400; extern NimStringDesc* tnl_176644_4151366050; extern NI floatsize_176642_4151366050; extern Tgcmode169080 gselectedgc_169133_2607990831; extern TNimType NTI292020; /* TNodeKind */ extern TNimType NTI135002; /* seq[string] */ extern TNimType NTI292435; /* TSymKind */ extern TNimType NTI292816; /* TLoc */ extern NI intsize_176641_4151366050; extern TNimType NTI292524; /* TMagic */ extern TNimType NTI191350; /* seq[Rope] */ extern TNimType NTI292796; /* TNodeSeq */ extern Ropeobj178006* mainmodprocs_529148_3723162438; extern Ropeobj178006* maindatinit_529151_3723162438; extern Ropeobj178006* mainmodinit_529149_3723162438; extern Ropeobj178006* othermodsinit_529150_3723162438; extern Tsystemos176004 targetos_176629_4151366050; extern TY191612* fileinfos_191629_155036129; extern Tsystemcpu176452 targetcpu_176627_4151366050; extern Ropeobj178006* gmapping_529152_3723162438; N_NIMCALL(void, T839829468_2)(void) { nimGCvisit((void*)generatedheader_532201_839829468, 0); } N_NIMCALL(void, T839829468_3)(void) { nimGCvisit((void*)indent_532655_839829468, 0); } static N_INLINE(Cell47304*, usrtocell_51440_1689653243)(void* usr0) { Cell47304* result0; result0 = (Cell47304*)0; result0 = ((Cell47304*) ((NI)((NU64)(((NI) (usr0))) - (NU64)(((NI)sizeof(Cell47304)))))); return result0; } static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47304* c0) { addzct_51417_1689653243((&gch_49858_1689653243.zct), c0); } static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) { { Cell47304* c0; if (!!((src0 == NIM_NIL))) goto LA3; c0 = usrtocell_51440_1689653243(src0); (*c0).refcount += ((NI) 8); } LA3: ; { Cell47304* c0; if (!!(((*dest0) == NIM_NIL))) goto LA7; c0 = usrtocell_51440_1689653243((*dest0)); { (*c0).refcount -= ((NI) 8); if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA11; rtladdzct_52601_1689653243(c0); } LA11: ; } LA7: ; (*dest0) = src0; } N_NIMCALL(void, T839829468_5)(void) { nimGCvisit((void*)nimtv_538656_839829468, 0); } N_NIMCALL(void, T839829468_6)(void) { nimGCvisit((void*)nimtvdeps_538674_839829468, 0); } static N_INLINE(void, nimGCunrefNoCycle)(void* p0) { Cell47304* c0; c0 = usrtocell_51440_1689653243(p0); { (*c0).refcount -= ((NI) 8); if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } N_NIMCALL(void, T839829468_7)(void) { nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0); nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0); } N_NIMCALL(void, T839829468_8)(void) { nimGCvisit((void*)gbreakpoints_548861_839829468, 0); } N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) { Tcgen529027* result0; result0 = (Tcgen529027*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (((NI) 0) <= (*s0).position); if (!(LOC3)) goto LA4; LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0)); LA4: ; if (!LOC3) goto LA5; result0 = gmodules_529170_3723162438->data[(*s0).position]; } goto LA1; LA5: ; { result0 = NIM_NIL; } LA1: ; return result0; } static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) { void* LOC1; LOC1 = (void*)0; LOC1 = memcpy(dest0, source0, ((size_t) (size0))); } static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) { copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1))))); (*dest0).Sup.len += (*src0).Sup.len; } N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) { NU32 result0; Tsym292834* m0; Tsym292834* p0; result0 = (NU32)0; m0 = s0; { while (1) { if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2; m0 = (*m0).owner; } LA2: ; } p0 = (*m0).owner; result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s); return result0; } static N_INLINE(void, incref_53419_1689653243)(Cell47304* c0) { (*c0).refcount = (NI)((NU64)((*c0).refcount) + (NU64)(((NI) 8))); } static N_INLINE(void, decref_53001_1689653243)(Cell47304* c0) { { (*c0).refcount -= ((NI) 8); if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } static N_INLINE(void, asgnRef)(void** dest0, void* src0) { { Cell47304* LOC5; if (!!((src0 == NIM_NIL))) goto LA3; LOC5 = (Cell47304*)0; LOC5 = usrtocell_51440_1689653243(src0); incref_53419_1689653243(LOC5); } LA3: ; { Cell47304* LOC10; if (!!(((*dest0) == NIM_NIL))) goto LA8; LOC10 = (Cell47304*)0; LOC10 = usrtocell_51440_1689653243((*dest0)); decref_53001_1689653243(LOC10); } LA8: ; (*dest0) = src0; } N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) { Toption169009Set result0; memset((void*)(&result0), 0, sizeof(result0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3; result0 = (goptions_169128_2607990831 & ~ 32768); } goto LA1; LA3: ; { result0 = goptions_169128_2607990831; } LA1: ; return result0; } N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) { Tcproc529021* result0; result0 = (Tcproc529021*)0; result0 = newproc_529206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 100000); return result0; } N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) { Tcproc529021* result0; result0 = (Tcproc529021*)0; result0 = newproc_529206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 200000); return result0; } N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels))); result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1); (*m0).labels += ((NI) 1); return result0; } N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) { Tcgen529027* result0; NimStringDesc* LOC1; NU32 LOC2; NimStringDesc* LOC3; NimStringDesc* LOC4; NimStringDesc* LOC5; result0 = (Tcgen529027*)0; result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027)); (*result0).Sup.Sup.m_type = (&NTI529027); LOC1 = (NimStringDesc*)0; LOC2 = (NU32)0; LOC2 = hashowner_532977_839829468(module0); LOC3 = (NimStringDesc*)0; LOC3 = HEX24_8401_1689653243(((NU64) (LOC2))); LOC1 = rawNewString(LOC3->Sup.len + 2); appendString(LOC1, ((NimStringDesc*) &T839829468_11)); appendString(LOC1, LOC3); appendString(LOC1, ((NimStringDesc*) &T839829468_12)); asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1)); initlinkedlist_147031_3771138726((&(*result0).headerfiles)); initintset_268885_2627731572((&(*result0).declaredthings)); initintset_268885_2627731572((&(*result0).declaredprotos)); LOC4 = (NimStringDesc*)0; LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0); if (LOC4) nimGCunrefNoCycle(LOC4); LOC5 = (NimStringDesc*)0; LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0); if (LOC5) nimGCunrefNoCycle(LOC5); initidtable_296019_850551059((&(*result0).typecache)); initidtable_296019_850551059((&(*result0).forwtypecache)); asgnRefNoCycle((void**) (&(*result0).module), module0); initintset_268885_2627731572((&(*result0).typeinfomarker)); asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0)); (*(*result0).initproc).options = initprocoptions_562635_839829468(result0); asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0)); asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0)); initnodetable_296085_850551059((&(*result0).datacache)); if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack); (*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0); if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs); (*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0); asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533598_839829468(result0)); asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533598_839829468(result0)); { if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8; (*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8)); (*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8))); (*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8))); } LA8: ; return result0; } N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) { Tcgen529027* result0; NimStringDesc* LOC1; result0 = (Tcgen529027*)0; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_192261_155036129(((NI32) ((*module0).position))); result0 = rawnewmodule_562663_839829468(module0, LOC1); return result0; } N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0) { Tcgen529027* result0; result0 = (Tcgen529027*)0; { Tcgen529027* LOC3; NimStringDesc* LOC6; LOC3 = (Tcgen529027*)0; LOC3 = getcgenmodule_532226_839829468(module0); if (!!((LOC3 == NIM_NIL))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_196185_1689653243(T839829468_9); internalerror_196113_155036129(LOC6); } LA4: ; result0 = rawnewmodule_563038_839829468(module0); { if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9; gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1))))); } LA9: ; asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0); { if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13; { NimStringDesc* LOC19; NimStringDesc* LOC20; if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17; LOC19 = (NimStringDesc*)0; LOC20 = (NimStringDesc*)0; LOC20 = tofilename_192257_155036129(((NI32) ((*module0).position))); LOC19 = rawNewString(LOC20->Sup.len + 28); appendString(LOC19, ((NimStringDesc*) &T839829468_13)); appendString(LOC19, LOC20); internalerror_196113_155036129(LOC19); } LA17: ; } LA13: ; return result0; } N_NIMCALL(Tpasscontext341002*, myopen_563112_839829468)(Tsym292834* module0) { Tpasscontext341002* result0; Tcgen529027* LOC1; result0 = (Tpasscontext341002*)0; LOC1 = (Tcgen529027*)0; LOC1 = newmodule_563044_839829468(module0); result0 = &LOC1->Sup; { NIM_BOOL LOC4; NimStringDesc* f0; NimStringDesc* LOC13; NimStringDesc* LOC14; LOC4 = (NIM_BOOL)0; LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0); if (!(LOC4)) goto LA5; LOC4 = (generatedheader_532201_839829468 == NIM_NIL); LA5: ; if (!LOC4) goto LA6; { if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10; f0 = headerfile_169138_2607990831; } goto LA8; LA10: ; { f0 = gprojectfull_169211_2607990831; } LA8: ; LOC13 = (NimStringDesc*)0; LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE); LOC14 = (NimStringDesc*)0; LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14)); asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14)); (*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8)); } LA6: ; return result0; } N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0) { NimStringDesc* result0; NimStringDesc* ext0; NimStringDesc* LOC13; NimStringDesc* LOC14; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; ext0 = copyString(((NimStringDesc*) &T839829468_15)); } goto LA1; LA5: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3)); if (LOC8) goto LA9; LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0); LA9: ; if (!LOC8) goto LA10; ext0 = copyString(((NimStringDesc*) &T839829468_16)); } goto LA1; LA10: ; { ext0 = copyString(((NimStringDesc*) &T839829468_17)); } LA1: ; LOC13 = (NimStringDesc*)0; LOC13 = withpackagename_170073_2607990831((*m0).cfilename); LOC14 = (NimStringDesc*)0; LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE); result0 = noschangeFileExt(LOC14, ext0); return result0; } N_NIMCALL(Tpasscontext341002*, myopencached_563246_839829468)(Tsym292834* module0, Trodreader332021* rd0) { Tpasscontext341002* result0; Tcgen529027* m0; NimStringDesc* LOC1; result0 = (Tpasscontext341002*)0; m0 = newmodule_563044_839829468(module0); LOC1 = (NimStringDesc*)0; LOC1 = getcfile_563201_839829468(m0); readmergeinfo_530613_2760143328(LOC1, m0); result0 = &m0->Sup; return result0; } static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((NI) 0) < gerrorcounter_192069_155036129); return result0; } N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) { { if (!((*a0).k == ((Tlockind292808) 0))) goto LA3; (*a0).k = k0; unsureAsgnRef((void**) (&(*a0).t), typ0); (*a0).s = s0; { if (!((*a0).r == NIM_NIL)) goto LA7; unsureAsgnRef((void**) (&(*a0).r), r0); } LA7: ; } LA3: ; } N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; switch ((*w0).Sup.id) { case ((NI) 200) ... ((NI) 262): case ((NI) 4) ... ((NI) 70): case ((NI) 138): { result0 = NIM_TRUE; goto BeforeRet; } break; default: { result0 = NIM_FALSE; goto BeforeRet; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = (*s0).loc.r; { NIM_BOOL keeporigname0; NIM_BOOL LOC5; NIM_BOOL LOC6; NIM_BOOL LOC9; NimStringDesc* LOC10; if (!(result0 == NIM_NIL)) goto LA3; LOC5 = (NIM_BOOL)0; LOC6 = (NIM_BOOL)0; LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0); if (!(LOC6)) goto LA7; LOC6 = ((IL64(2149580812) & (*s0).flags) == 0); LA7: ; LOC5 = LOC6; if (!(LOC5)) goto LA8; LOC9 = (NIM_BOOL)0; LOC9 = iskeyword_532960_839829468((*s0).name); LOC5 = !(LOC9); LA8: ; keeporigname0 = LOC5; LOC10 = (NimStringDesc*)0; LOC10 = mangle_528847_2036603609((*(*s0).name).s); result0 = rope_178277_2381377266(LOC10); { if (!keeporigname0) goto LA13; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18)); } goto LA11; LA13: ; { TY533289 LOC16; Ropeobj178006* LOC17; Ropeobj178006* LOC18; TY533289 LOC19; Ropeobj178006* LOC20; NU32 LOC21; Ropeobj178006* LOC22; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj178006*)0; LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0); add_178482_2381377266(&result0, LOC17); LOC18 = (Ropeobj178006*)0; LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id))); add_178482_2381377266(&result0, LOC18); memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ropeobj178006*)0; LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0); add_178482_2381377266(&result0, LOC20); LOC21 = (NU32)0; LOC21 = hashowner_532977_839829468(s0); LOC22 = (Ropeobj178006*)0; LOC22 = rope_178401_2381377266(((NI64) (LOC21))); add_178482_2381377266(&result0, LOC22); } LA11: ; asgnRefNoCycle((void**) (&(*s0).loc.r), result0); } LA3: ; return result0; } N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) { { Ropeobj178006* LOC5; if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = manglename_533205_839829468(sym0); fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2)); } LA3: ; } N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) { { NimStringDesc* LOC5; NIM_BOOL LOC6; if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = getstr_297230_850551059((*(*sym0).annex).path); LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5); } LA3: ; } static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) { (*dest0).data[((*dest0).Sup.len)- 0] = c0; (*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0; (*dest0).Sup.len += ((NI) 1); } N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = !(((*prc0).typ == NIM_NIL)); return result0; } N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) { (*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*)); asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0); ++(*m0).forwardedprocs->Sup.len; gforwardedprocscounter_529171_3723162438 += ((NI) 1); } N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) { { TY532811 LOC5; NimStringDesc* LOC6; if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NimStringDesc*)0; LOC6 = makesinglelinecstring_528835_2036603609(filename0); LOC5[0] = rope_178277_2381377266(LOC6); LOC5[1] = rope_178401_2381377266(((NI64) (line0))); addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2); } LA3: ; } static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) { NI result0; result0 = (NI)0; result0 = ((NI) (info0.line)); return result0; } N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) { NI result0; result0 = (NI)0; result0 = tolinenumber_192415_155036129(info0); { if (!(result0 < ((NI) 0))) goto LA3; result0 = ((NI) 0); } LA3: ; return result0; } N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) { NimStringDesc* LOC1; NI LOC2; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_192261_155036129(info0.fileindex); LOC2 = (NI)0; LOC2 = safelinenm_532721_839829468(info0); genclinedir_532725_839829468(r0, LOC1, LOC2); } N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) { Tctypekind529007 result0; NI64 LOC1; result0 = (Tctypekind529007)0; LOC1 = (NI64)0; LOC1 = getsize_320135_3876443242(typ0); switch (((NI) (LOC1))) { case ((NI) 1): { result0 = ((Tctypekind529007) 4); } break; case ((NI) 2): { result0 = ((Tctypekind529007) 5); } break; case ((NI) 4): { result0 = ((Tctypekind529007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind529007) 7); } break; default: { result0 = ((Tctypekind529007) 17); } break; } return result0; } N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0) { Tctypekind529007 result0; result0 = (Tctypekind529007)0; switch ((*typ0).kind) { case ((Ttypekind292244) 0): case ((Ttypekind292244) 7): { result0 = ((Tctypekind529007) 0); } break; case ((Ttypekind292244) 1): { result0 = ((Tctypekind529007) 2); } break; case ((Ttypekind292244) 2): { result0 = ((Tctypekind529007) 1); } break; case ((Ttypekind292244) 19): { result0 = mapsettype_533389_839829468(typ0); } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 4): case ((Ttypekind292244) 16): case ((Ttypekind292244) 48): { result0 = ((Tctypekind529007) 17); } break; case ((Ttypekind292244) 17): case ((Ttypekind292244) 18): { result0 = ((Tctypekind529007) 19); } break; case ((Ttypekind292244) 10): case ((Ttypekind292244) 11): case ((Ttypekind292244) 12): case ((Ttypekind292244) 13): case ((Ttypekind292244) 15): case ((Ttypekind292244) 46): case ((Ttypekind292244) 47): case ((Ttypekind292244) 49): case ((Ttypekind292244) 8): { Ttype292840* LOC8; LOC8 = (Ttype292840*)0; LOC8 = lastson_295377_850551059(typ0); result0 = maptype_533394_839829468(LOC8); } break; case ((Ttypekind292244) 14): { { NI64 LOC12; LOC12 = (NI64)0; LOC12 = firstord_320001_3876443242(typ0); if (!(LOC12 < IL64(0))) goto LA13; result0 = ((Tctypekind529007) 6); } goto LA10; LA13: ; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = getsize_320135_3876443242(typ0); switch (((NI) (LOC16))) { case ((NI) 1): { result0 = ((Tctypekind529007) 13); } break; case ((NI) 2): { result0 = ((Tctypekind529007) 14); } break; case ((NI) 4): { result0 = ((Tctypekind529007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind529007) 7); } break; default: { internalerror_196113_155036129(((NimStringDesc*) &T839829468_25)); } break; } } LA10: ; } break; case ((Ttypekind292244) 20): { result0 = maptype_533394_839829468((*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind292244) 21): case ((Ttypekind292244) 23): case ((Ttypekind292244) 22): { Ttype292840* base0; Ttype292840* LOC24; LOC24 = (Ttype292840*)0; LOC24 = lastson_295377_850551059(typ0); base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256)); switch ((*base0).kind) { case ((Ttypekind292244) 27): case ((Ttypekind292244) 4): case ((Ttypekind292244) 16): case ((Ttypekind292244) 48): { result0 = ((Tctypekind529007) 18); } break; default: { result0 = ((Tctypekind529007) 20); } break; } } break; case ((Ttypekind292244) 26): { result0 = ((Tctypekind529007) 20); } break; case ((Ttypekind292244) 24): { result0 = ((Tctypekind529007) 22); } break; case ((Ttypekind292244) 25): { { if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32; result0 = ((Tctypekind529007) 23); } goto LA30; LA32: ; { result0 = ((Tctypekind529007) 19); } LA30: ; } break; case ((Ttypekind292244) 28): { result0 = ((Tctypekind529007) 21); } break; case ((Ttypekind292244) 29): { result0 = ((Tctypekind529007) 24); } break; case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44): { result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3)))); } break; case ((Ttypekind292244) 59): { { Ttype292840* LOC43; if (!!(((*typ0).n == NIM_NIL))) goto LA41; LOC43 = (Ttype292840*)0; LOC43 = lastson_295377_850551059(typ0); result0 = maptype_533394_839829468(LOC43); } goto LA39; LA41: ; { internalerror_196113_155036129(((NimStringDesc*) &T839829468_25)); } LA39: ; } break; default: { internalerror_196113_155036129(((NimStringDesc*) &T839829468_25)); } break; } return result0; } N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = containsgarbagecollectedref_320117_3876443242(typ0); return result0; } static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; NIM_BOOL LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*typ0).kind == ((Ttypekind292244) 17)); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL); LA5: ; LOC3 = LOC4; if (LOC3) goto LA6; LOC3 = ispureobject_320138_3876443242(typ0); LA6: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!(rettype0 == NIM_NIL)) goto LA3; result0 = NIM_TRUE; } goto LA1; LA3: ; { Tctypekind529007 LOC6; LOC6 = (Tctypekind529007)0; LOC6 = maptype_533394_839829468(rettype0); switch (LOC6) { case ((Tctypekind529007) 17): { Ttype292840* LOC8; LOC8 = (Ttype292840*)0; LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256)); result0 = !(((14680064 &((NU64)1<<((NU)((*LOC8).kind)&63U)))!=0)); } break; case ((Tctypekind529007) 19): { Ttype292840* t0; NIM_BOOL LOC16; NIM_BOOL LOC18; NIM_BOOL LOC20; t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256)); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = isimportedcpptype_533478_839829468(rettype0); if (LOC12) goto LA13; LOC12 = isimportedcpptype_533478_839829468(t0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; goto BeforeRet; } LA14: ; LOC16 = (NIM_BOOL)0; LOC16 = needscomplexassignment_533511_839829468(t0); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = ((*t0).kind == ((Ttypekind292244) 17)); if (!(LOC18)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = isobjlackingtypefield_533515_839829468(t0); LOC18 = !(LOC20); LA19: ; LOC16 = LOC18; LA17: ; result0 = LOC16; } break; default: { result0 = NIM_FALSE; } break; } } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NimStringDesc* LOC5; if (!!(((*typ0).sym == NIM_NIL))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s); result0 = rope_178277_2381377266(LOC5); } goto LA1; LA3: ; { TY533289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*typ0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*typ0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*(*typ0).sym).loc.r; } goto LA1; LA5: ; { { Ropeobj178006* LOC12; Ropeobj178006* LOC13; if (!((*typ0).loc.r == NIM_NIL)) goto LA10; LOC12 = (Ropeobj178006*)0; LOC12 = typename_533292_839829468(typ0); LOC13 = (Ropeobj178006*)0; LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id))); asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13)); } LA10: ; result0 = (*typ0).loc.r; } LA1: ; { NimStringDesc* LOC18; if (!(result0 == NIM_NIL)) goto LA16; LOC18 = (NimStringDesc*)0; LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13); appendString(LOC18, ((NimStringDesc*) &T839829468_29)); appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244))); internalerror_196113_155036129(LOC18); } LA16: ; return result0; } N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = !(((*t0).sym == NIM_NIL)); if (!(LOC4)) goto LA5; LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA6; LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0)); LA6: ; if (!LOC3) goto LA7; result0 = gettypename_533313_839829468(t0); } goto LA1; LA7: ; { result0 = rope_178277_2381377266(literal0); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; switch ((*typ0).kind) { case ((Ttypekind292244) 26): { result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30)); } break; case ((Ttypekind292244) 28): { Ropeobj178006* LOC3; LOC3 = (Ropeobj178006*)0; LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31)); result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32)); } break; case ((Ttypekind292244) 29): { result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33)); } break; case ((Ttypekind292244) 1): { result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34)); } break; case ((Ttypekind292244) 2): { result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35)); } break; case ((Ttypekind292244) 5): { result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18)); } break; case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44): { result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]); } break; case ((Ttypekind292244) 13): case ((Ttypekind292244) 20): case ((Ttypekind292244) 15): { result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind292244) 59): { { Ttype292840* LOC15; if (!!(((*typ0).n == NIM_NIL))) goto LA13; LOC15 = (Ttype292840*)0; LOC15 = lastson_295377_850551059(typ0); result0 = getsimpletypedesc_533936_839829468(m0, LOC15); } goto LA11; LA13: ; { internalerror_196113_155036129(((NimStringDesc*) &T839829468_50)); } LA11: ; } break; case ((Ttypekind292244) 11): { Ttype292840* LOC18; LOC18 = (Ttype292840*)0; LOC18 = lastson_295377_850551059(typ0); result0 = getsimpletypedesc_533936_839829468(m0, LOC18); } break; default: { result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0) { Ropeobj178006* result0; Tidobj199004* LOC1; TNimObject* LOC2; result0 = (Ropeobj178006*)0; LOC1 = (Tidobj199004*)0; LOC1 = &key0->Sup; LOC2 = (TNimObject*)0; LOC2 = idtableget_299086_2984716966(tab0, LOC1); result0 = ((Ropeobj178006*) (LOC2)); return result0; } N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!(typ0 == NIM_NIL)) goto LA3; result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26)); } goto LA1; LA3: ; { result0 = getsimpletypedesc_533936_839829468(m0, typ0); { if (!(result0 == NIM_NIL)) goto LA8; result0 = cachegettype_533593_839829468((*m0).typecache, typ0); } LA8: ; } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; result0 = copyString(((NimStringDesc*) &T839829468_54)); } goto LA1; LA5: ; { result0 = copyString(((NimStringDesc*) &T839829468_55)); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3; result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56)); } goto LA1; LA3: ; { result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57)); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) { Ropeobj178006* result0; { result0 = (Ropeobj178006*)0; result0 = cachegettype_533593_839829468((*m0).forwtypecache, typ0); { if (!!((result0 == NIM_NIL))) goto LA3; goto BeforeRet; } LA3: ; result0 = gettypepre_533972_839829468(m0, typ0); { if (!!((result0 == NIM_NIL))) goto LA7; goto BeforeRet; } LA7: ; switch ((*typ0).kind) { case ((Ttypekind292244) 24): case ((Ttypekind292244) 18): case ((Ttypekind292244) 17): { Tidobj199004* LOC17; TNimObject* LOC18; result0 = gettypename_533313_839829468(typ0); { NIM_BOOL LOC12; NimStringDesc* LOC15; TY532811 LOC16; LOC12 = (NIM_BOOL)0; LOC12 = isimportedtype_533451_839829468(typ0); if (!!(LOC12)) goto LA13; LOC15 = (NimStringDesc*)0; LOC15 = getforwardstructformat_534015_839829468(m0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = structorunion_534001_839829468(typ0); LOC16[1] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2); } LA13: ; LOC17 = (Tidobj199004*)0; LOC17 = &typ0->Sup; LOC18 = (TNimObject*)0; LOC18 = &result0->Sup; idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16); appendString(LOC20, ((NimStringDesc*) &T839829468_58)); appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244))); appendChar(LOC20, 41); internalerror_196113_155036129(LOC20); } break; } }BeforeRet: ; return result0; } N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) { (*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*)); asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0); ++(*m0).typestack->Sup.len; } N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) { Ropeobj178006* result0; Ttype292840* etb0; result0 = (Ropeobj178006*)0; etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256)); switch ((*etb0).kind) { case ((Ttypekind292244) 17): case ((Ttypekind292244) 18): { { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = isimportedcpptype_533478_839829468(etb0); if (!(LOC4)) goto LA5; LOC4 = ((*t0).kind == ((Ttypekind292244) 11)); LA5: ; if (!LOC4) goto LA6; result0 = gettypedescaux_533505_839829468(m0, t0, check0); } goto LA2; LA6: ; { Ttype292840* x0; x0 = getuniquetype_528640_2036603609(etb0); result0 = gettypeforward_534039_839829468(m0, x0); pushtype_533958_839829468(m0, x0); } LA2: ; } break; case ((Ttypekind292244) 24): { Ttype292840* x0; Ropeobj178006* LOC10; x0 = getuniquetype_528640_2036603609(etb0); LOC10 = (Ropeobj178006*)0; LOC10 = gettypeforward_534039_839829468(m0, x0); result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53)); pushtype_533958_839829468(m0, x0); } break; default: { result0 = gettypedescaux_533505_839829468(m0, t0, check0); } break; } return result0; } static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) { NI result0; result0 = (NI)0; { if (!(*n0).kindU.S6.sons == 0) goto LA3; result0 = ((NI) 0); } goto LA1; LA3: ; { result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0); } LA1: ; return result0; } N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006* LOC1; LOC1 = (Ropeobj178006*)0; LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0); add_178482_2381377266(c0, LOC1); } N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) { NIM_BOOL result0; NI begin0; { result0 = (NIM_BOOL)0; (*cursor0) += ((NI) 1); begin0 = (*cursor0); { while (1) { if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2; (*cursor0) += ((NI) 1); } LA2: ; } { if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5; (*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48)))); (*outstars0) = (NI)((*cursor0) - begin0); (*cursor0) += ((NI) 1); result0 = NIM_TRUE; goto BeforeRet; } goto LA3; LA5: ; { result0 = NIM_FALSE; goto BeforeRet; } LA3: ; }BeforeRet: ; return result0; } N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) { Ttype292840* result0; result0 = (Ttype292840*)0; { NI LOC3; LOC3 = (NI)0; LOC3 = len_295339_850551059(typ0); if (!(LOC3 <= idx0)) goto LA4; internalerror_196113_155036129(((NimStringDesc*) &T839829468_81)); } LA4: ; result0 = (*typ0).sons->data[idx0]; { NI i_534906_839829468; NI res_534931_839829468; i_534906_839829468 = (NI)0; res_534931_839829468 = ((NI) 1); { while (1) { if (!(res_534931_839829468 <= stars0)) goto LA8; i_534906_839829468 = res_534931_839829468; { NIM_BOOL LOC11; NI LOC13; LOC11 = (NIM_BOOL)0; LOC11 = !((result0 == NIM_NIL)); if (!(LOC11)) goto LA12; LOC13 = (NI)0; LOC13 = len_295339_850551059(result0); LOC11 = (((NI) 0) < LOC13); LA12: ; if (!LOC11) goto LA14; { if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18; result0 = (*result0).sons->data[((NI) 1)]; } goto LA16; LA18: ; { result0 = elemtype_320394_3876443242(result0); } LA16: ; } LA14: ; res_534931_839829468 += ((NI) 1); } LA8: ; } } return result0; } N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = mangle_528847_2036603609((*name0).s); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = iskeyword_532960_839829468(name0); if (!LOC3) goto LA4; result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]); } LA4: ; return result0; } N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*rectype0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*field0).loc.r; } goto LA1; LA5: ; { NimStringDesc* LOC8; LOC8 = (NimStringDesc*)0; LOC8 = manglefield_532973_839829468((*field0).name); result0 = rope_178277_2381377266(LOC8); } LA1: ; { if (!(result0 == NIM_NIL)) goto LA11; internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96)); } LA11: ; return result0; } N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) { Ropeobj178006* result0; Ropeobj178006* ae0; Ropeobj178006* uname0; Ropeobj178006* sname0; Ropeobj178006* a0; Tnode292802* k0; Tsym292834* field0; { result0 = (Ropeobj178006*)0; ae0 = (Ropeobj178006*)0; uname0 = (Ropeobj178006*)0; sname0 = (Ropeobj178006*)0; a0 = (Ropeobj178006*)0; k0 = (Tnode292802*)0; field0 = (Tsym292834*)0; result0 = NIM_NIL; switch ((*n0).kind) { case ((Tnodekind292020) 138): { { NI i_534447_839829468; NI HEX3Atmp_534620_839829468; NI LOC3; NI res_534623_839829468; i_534447_839829468 = (NI)0; HEX3Atmp_534620_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_295351_850551059(n0); HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1)); res_534623_839829468 = ((NI) 0); { while (1) { Ropeobj178006* LOC6; if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5; i_534447_839829468 = res_534623_839829468; LOC6 = (Ropeobj178006*)0; LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0); add_178482_2381377266(&result0, LOC6); res_534623_839829468 += ((NI) 1); } LA5: ; } } } break; case ((Tnodekind292020) 139): { Ropeobj178006* LOC12; NimStringDesc* LOC13; NimStringDesc* LOC14; Ropeobj178006* unionbody0; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89)); } LA10: ; LOC12 = (Ropeobj178006*)0; LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0); add_178482_2381377266(&result0, LOC12); LOC13 = (NimStringDesc*)0; LOC14 = (NimStringDesc*)0; LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); LOC13 = rawNewString(LOC14->Sup.len + 1); appendString(LOC13, LOC14); appendChar(LOC13, 85); uname0 = rope_178277_2381377266(LOC13); { TY532811 LOC19; if (!!((accessexpr0 == NIM_NIL))) goto LA17; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = accessexpr0; LOC19[1] = uname0; ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2); } goto LA15; LA17: ; { ae0 = uname0; } LA15: ; unionbody0 = NIM_NIL; { NI i_534491_839829468; NI HEX3Atmp_534629_839829468; NI LOC22; NI res_534632_839829468; i_534491_839829468 = (NI)0; HEX3Atmp_534629_839829468 = (NI)0; LOC22 = (NI)0; LOC22 = sonslen_295351_850551059(n0); HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1)); res_534632_839829468 = ((NI) 1); { while (1) { if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24; i_534491_839829468 = res_534632_839829468; switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) { case ((Tnodekind292020) 85): case ((Tnodekind292020) 88): { k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]); { Ropeobj178006* LOC30; TY532811 LOC31; Ropeobj178006* LOC32; if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28; LOC30 = (Ropeobj178006*)0; LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468))); sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = ae0; LOC31[1] = sname0; LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2); a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0); { TY178507 LOC37; if (!!((a0 == NIM_NIL))) goto LA35; add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92)); add_178482_2381377266(&unionbody0, a0); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = sname0; addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1); } LA35: ; } goto LA26; LA28: ; { Ropeobj178006* LOC39; LOC39 = (Ropeobj178006*)0; LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0); add_178482_2381377266(&unionbody0, LOC39); } LA26: ; } break; default: { internalerror_196113_155036129(((NimStringDesc*) &T839829468_94)); } break; } res_534632_839829468 += ((NI) 1); } LA24: ; } } { TY532811 LOC45; if (!!((unionbody0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = unionbody0; LOC45[1] = uname0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2); } LA43: ; } break; case ((Tnodekind292020) 3): { field0 = (*n0).kindU.S4.sym; { if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49; goto BeforeRet; } LA49: ; sname0 = manglerecfieldname_534361_839829468(field0, rectype0); { TY532811 LOC55; if (!!((accessexpr0 == NIM_NIL))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = accessexpr0; LOC55[1] = sname0; ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2); } goto LA51; LA53: ; { ae0 = sname0; } LA51: ; fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0)); { NIM_BOOL LOC59; Ttype292840* fieldtype0; LOC59 = (NIM_BOOL)0; LOC59 = isimportedcpptype_533478_839829468(rectype0); if (!!(LOC59)) goto LA60; fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256)); { NIM_BOOL LOC64; TY532811 LOC68; Ttype292840* LOC69; LOC64 = (NIM_BOOL)0; LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16)); if (!(LOC64)) goto LA65; LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0); LA65: ; if (!LOC64) goto LA66; memset((void*)LOC68, 0, sizeof(LOC68)); LOC69 = (Ttype292840*)0; LOC69 = elemtype_320394_3876443242(fieldtype0); LOC68[0] = gettypedescaux_533505_839829468(m0, LOC69, check0); LOC68[1] = sname0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2); } goto LA62; LA66: ; { TY532811 LOC73; if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0); LOC73[1] = sname0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2); } goto LA62; LA71: ; { TY535238 LOC77; NimStringDesc* LOC78; if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75; memset((void*)LOC77, 0, sizeof(LOC77)); LOC77[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0); LOC77[1] = sname0; LOC78 = (NimStringDesc*)0; LOC78 = nimIntToStr((*field0).kindU.S4.bitsize); LOC77[2] = rope_178277_2381377266(LOC78); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3); } goto LA62; LA75: ; { TY532811 LOC80; memset((void*)LOC80, 0, sizeof(LOC80)); LOC80[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0); LOC80[1] = sname0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2); } LA62: ; } LA60: ; } break; default: { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99)); } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0); return result0; } N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) { Ropeobj178006* result0; NIM_BOOL hasfield0; Ropeobj178006* attribute0; TY535238 LOC6; Ropeobj178006* desc0; NimStringDesc* LOC46; result0 = (Ropeobj178006*)0; hasfield0 = NIM_FALSE; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3; attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19); } goto LA1; LA3: ; { attribute0 = NIM_NIL; } LA1: ; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = structorunion_534001_839829468(typ0); LOC6[1] = name0; LOC6[2] = attribute0; result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3); { if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9; { if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; TY533289 LOC23; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = !(((*typ0).sym == NIM_NIL)); if (!(LOC18)) goto LA19; LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0); LA19: ; LOC17 = LOC18; if (LOC17) goto LA20; LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0); LA20: ; if (!LOC17) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0); } goto LA15; LA21: ; { TY532811 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = name0; LOC25[1] = attribute0; appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2); hasfield0 = NIM_TRUE; } LA15: ; } goto LA11; LA13: ; { NIM_BOOL LOC27; TY178507 LOC31; Ttype292840* LOC32; LOC27 = (NIM_BOOL)0; LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC27) goto LA28; LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA28: ; if (!LOC27) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ttype292840*)0; LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC31[0] = gettypedescaux_533505_839829468(m0, LOC32, check0); appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1); hasfield0 = NIM_TRUE; } goto LA11; LA29: ; { TY178507 LOC34; Ttype292840* LOC35; memset((void*)LOC34, 0, sizeof(LOC34)); LOC35 = (Ttype292840*)0; LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC34[0] = gettypedescaux_533505_839829468(m0, LOC35, check0); appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1); hasfield0 = NIM_TRUE; } LA11: ; } goto LA7; LA9: ; { TY178507 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = name0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1); } LA7: ; desc0 = getrecordfields_534636_839829468(m0, typ0, check0); { NIM_BOOL LOC40; TY533289 LOC44; LOC40 = (NIM_BOOL)0; LOC40 = (desc0 == NIM_NIL); if (!(LOC40)) goto LA41; LOC40 = !(hasfield0); LA41: ; if (!LOC40) goto LA42; memset((void*)LOC44, 0, sizeof(LOC44)); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0); } goto LA38; LA42: ; { add_178482_2381377266(&result0, desc0); } LA38: ; LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2); appendString(LOC46, ((NimStringDesc*) &T839829468_101)); appendString(LOC46, tnl_176644_4151366050); add_178487_2381377266(&result0, LOC46); return result0; } N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) { Ropeobj178006* result0; TY532811 LOC1; Ropeobj178006* desc0; NimStringDesc* LOC13; result0 = (Ropeobj178006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = structorunion_534001_839829468(typ0); LOC1[1] = name0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2); desc0 = NIM_NIL; { NI i_534799_839829468; NI HEX3Atmp_534820_839829468; NI LOC3; NI res_534823_839829468; i_534799_839829468 = (NI)0; HEX3Atmp_534820_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_295327_850551059(typ0); HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1)); res_534823_839829468 = ((NI) 0); { while (1) { TY532811 LOC6; if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5; i_534799_839829468 = res_534823_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0); LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468))); addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2); res_534823_839829468 += ((NI) 1); } LA5: ; } } { NimStringDesc* LOC11; if (!(desc0 == NIM_NIL)) goto LA9; LOC11 = (NimStringDesc*)0; LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11); appendString(LOC11, ((NimStringDesc*) &T839829468_104)); appendString(LOC11, tnl_176644_4151366050); add_178487_2381377266(&result0, LOC11); } goto LA7; LA9: ; { add_178482_2381377266(&result0, desc0); } LA7: ; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2); appendString(LOC13, ((NimStringDesc*) &T839829468_101)); appendString(LOC13, tnl_176644_4151366050); add_178487_2381377266(&result0, LOC13); return result0; } N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) { Ropeobj178006* result0; Ttype292840* t_534942_839829468; { result0 = (Ropeobj178006*)0; t_534942_839829468 = getuniquetype_528640_2036603609(typ0); { if (!(t_534942_839829468 == NIM_NIL)) goto LA3; internalerror_196113_155036129(((NimStringDesc*) &T839829468_27)); } LA3: ; { if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7; useheader_532369_839829468(m0, (*t_534942_839829468).sym); } LA7: ; result0 = gettypepre_533972_839829468(m0, t_534942_839829468); { if (!!((result0 == NIM_NIL))) goto LA11; goto BeforeRet; } LA11: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id); if (!LOC15) goto LA16; { NIM_BOOL LOC20; NimStringDesc* LOC24; NimStringDesc* LOC25; LOC20 = (NIM_BOOL)0; LOC20 = isimportedcpptype_533478_839829468(typ0); if (LOC20) goto LA21; LOC20 = isimportedcpptype_533478_839829468(t_534942_839829468); LA21: ; if (!!(LOC20)) goto LA22; LOC24 = (NimStringDesc*)0; LOC25 = (NimStringDesc*)0; LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0)); LOC24 = rawNewString(LOC25->Sup.len + 28); appendString(LOC24, ((NimStringDesc*) &T839829468_51)); appendString(LOC24, LOC25); internalerror_196113_155036129(LOC24); } LA22: ; } LA16: ; switch ((*t_534942_839829468).kind) { case ((Ttypekind292244) 22): case ((Ttypekind292244) 21): case ((Ttypekind292244) 23): { NimStringDesc* star0; Ttype292840* et0; Ttype292840* LOC38; Ttype292840* etb0; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC33; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23)); if (!(LOC30)) goto LA31; LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0)); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC33) goto LA34; LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA34: ; LOC29 = LOC33; LA32: ; if (!LOC29) goto LA35; star0 = copyString(((NimStringDesc*) &T839829468_52)); } goto LA27; LA35: ; { star0 = copyString(((NimStringDesc*) &T839829468_53)); } LA27: ; LOC38 = (Ttype292840*)0; LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256)); et0 = lastson_295377_850551059(LOC38); etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256)); { if (!((IL64(281475110993936) &((NU64)1<<((NU)((*etb0).kind)&63U)))!=0)) goto LA41; et0 = elemtype_320394_3876443242(etb0); etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256)); star0->data[((NI) 0)] = 42; } LA41: ; switch ((*etb0).kind) { case ((Ttypekind292244) 17): case ((Ttypekind292244) 18): { { NIM_BOOL LOC46; Ropeobj178006* LOC50; LOC46 = (NIM_BOOL)0; LOC46 = isimportedcpptype_533478_839829468(etb0); if (!(LOC46)) goto LA47; LOC46 = ((*et0).kind == ((Ttypekind292244) 11)); LA47: ; if (!LOC46) goto LA48; LOC50 = (Ropeobj178006*)0; LOC50 = gettypedescaux_533505_839829468(m0, et0, check0); result0 = HEX26_178447_2381377266(LOC50, star0); } goto LA44; LA48: ; { Ttype292840* x0; Ropeobj178006* name0; Tidobj199004* LOC52; TNimObject* LOC53; x0 = getuniquetype_528640_2036603609(etb0); name0 = gettypeforward_534039_839829468(m0, x0); result0 = HEX26_178447_2381377266(name0, star0); LOC52 = (Tidobj199004*)0; LOC52 = &t_534942_839829468->Sup; LOC53 = (TNimObject*)0; LOC53 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53); pushtype_533958_839829468(m0, x0); } LA44: ; } break; case ((Ttypekind292244) 24): { Ttype292840* x0; Ropeobj178006* name0; Ropeobj178006* LOC55; Tidobj199004* LOC56; TNimObject* LOC57; x0 = getuniquetype_528640_2036603609(etb0); name0 = gettypeforward_534039_839829468(m0, x0); LOC55 = (Ropeobj178006*)0; LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53)); result0 = HEX26_178447_2381377266(LOC55, star0); LOC56 = (Tidobj199004*)0; LOC56 = &t_534942_839829468->Sup; LOC57 = (TNimObject*)0; LOC57 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57); pushtype_533958_839829468(m0, x0); } break; default: { Ropeobj178006* LOC59; Tidobj199004* LOC60; TNimObject* LOC61; LOC59 = (Ropeobj178006*)0; LOC59 = gettypedescaux_533505_839829468(m0, et0, check0); result0 = HEX26_178447_2381377266(LOC59, star0); LOC60 = (Tidobj199004*)0; LOC60 = &t_534942_839829468->Sup; LOC61 = (TNimObject*)0; LOC61 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61); } break; } } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { Ropeobj178006* LOC63; Tidobj199004* LOC64; TNimObject* LOC65; LOC63 = (Ropeobj178006*)0; LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0); result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53)); LOC64 = (Tidobj199004*)0; LOC64 = &t_534942_839829468->Sup; LOC65 = (TNimObject*)0; LOC65 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65); } break; case ((Ttypekind292244) 20): case ((Ttypekind292244) 14): { Ttype292840* t0; { if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69; t0 = lastson_295377_850551059(t_534942_839829468); } goto LA67; LA69: ; { t0 = t_534942_839829468; } LA67: ; result0 = cachegettype_533593_839829468((*m0).typecache, t0); { if (!(result0 == NIM_NIL)) goto LA74; result0 = gettypename_533313_839829468(t0); { NIM_BOOL LOC78; NIM_BOOL LOC80; Tidobj199004* LOC84; TNimObject* LOC85; NI size0; NU32 owner0; LOC78 = (NIM_BOOL)0; LOC78 = isimportedcpptype_533478_839829468(t0); if (LOC78) goto LA79; LOC80 = (NIM_BOOL)0; LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0); if (!(LOC80)) goto LA81; LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0)); LA81: ; LOC78 = LOC80; LA79: ; if (!!(LOC78)) goto LA82; LOC84 = (Tidobj199004*)0; LOC84 = &t0->Sup; LOC85 = (TNimObject*)0; LOC85 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85); size0 = (NI)0; { NI64 LOC88; TY178507 LOC91; LOC88 = (NI64)0; LOC88 = firstord_320001_3876443242(t0); if (!(LOC88 < IL64(0))) goto LA89; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1); size0 = ((NI) 4); } goto LA86; LA89: ; { NI64 LOC93; LOC93 = (NI64)0; LOC93 = getsize_320135_3876443242(t0); size0 = ((NI) (LOC93)); switch (size0) { case ((NI) 1): { TY178507 LOC95; memset((void*)LOC95, 0, sizeof(LOC95)); LOC95[0] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1); } break; case ((NI) 2): { TY178507 LOC97; memset((void*)LOC97, 0, sizeof(LOC97)); LOC97[0] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1); } break; case ((NI) 4): { TY178507 LOC99; memset((void*)LOC99, 0, sizeof(LOC99)); LOC99[0] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1); } break; case ((NI) 8): { TY178507 LOC101; memset((void*)LOC101, 0, sizeof(LOC101)); LOC101[0] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1); } break; default: { internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63)); } break; } } LA86: ; owner0 = hashowner_532977_839829468((*t0).sym); { NIM_BOOL LOC105; TY203017* vals0; Enumdesc203007 LOC114; LOC105 = (NIM_BOOL)0; LOC105 = hasenum_203230_1926258066((&gdebuginfo_203470_1926258066), (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0); if (!!(LOC105)) goto LA106; vals0 = (TY203017*) newSeq((&NTI203017), 0); { NI i_535144_839829468; NI HEX3Atmp_535649_839829468; NI LOC109; NI res_535652_839829468; i_535144_839829468 = (NI)0; HEX3Atmp_535649_839829468 = (NI)0; LOC109 = (NI)0; LOC109 = len_293081_850551059((*t0).n); HEX3Atmp_535649_839829468 = (NI)(LOC109 - ((NI) 1)); res_535652_839829468 = ((NI) 0); { while (1) { Tsym292834* field0; TY203018 LOC112; NimStringDesc* LOC113; if (!(res_535652_839829468 <= HEX3Atmp_535649_839829468)) goto LA111; i_535144_839829468 = res_535652_839829468; field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym; memset((void*)(&LOC112), 0, sizeof(LOC112)); LOC112.Field0 = copyString((*(*field0).name).s); LOC112.Field1 = (*field0).position; vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018)); LOC113 = (NimStringDesc*)0; LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0); if (LOC113) nimGCunrefNoCycle(LOC113); vals0->data[vals0->Sup.len].Field1 = LOC112.Field1; ++vals0->Sup.len; res_535652_839829468 += ((NI) 1); } LA111: ; } } memset((void*)(&LOC114), 0, sizeof(LOC114)); memset((void*)(&LOC114), 0, sizeof(LOC114)); LOC114.size = size0; LOC114.owner = owner0; LOC114.id = (*(*t0).sym).Sup.id; LOC114.name = copyString((*(*(*t0).sym).name).s); genericSeqAssign((&LOC114.values), vals0, (&NTI203017)); registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114)); } LA106: ; } LA82: ; } LA74: ; } break; case ((Ttypekind292244) 25): { Tidobj199004* LOC116; TNimObject* LOC117; Ropeobj178006* rettype0; Ropeobj178006* desc0; result0 = gettypename_533313_839829468(t_534942_839829468); LOC116 = (Tidobj199004*)0; LOC116 = &t_534942_839829468->Sup; LOC117 = (TNimObject*)0; LOC117 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117); rettype0 = (Ropeobj178006*)0; desc0 = (Ropeobj178006*)0; genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE); { NIM_BOOL LOC120; LOC120 = (NIM_BOOL)0; LOC120 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC120)) goto LA121; { TY535235 LOC127; if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t_534942_839829468).callconv)- 0]); LOC127[1] = rettype0; LOC127[2] = result0; LOC127[3] = desc0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4); } goto LA123; LA125: ; { TY535238 LOC129; memset((void*)LOC129, 0, sizeof(LOC129)); LOC129[0] = result0; LOC129[1] = rettype0; LOC129[2] = desc0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3); } LA123: ; } LA121: ; } break; case ((Ttypekind292244) 24): { Tidobj199004* LOC144; Ropeobj178006* LOC145; TNimObject* LOC146; result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468); { Tidobj199004* LOC142; TNimObject* LOC143; if (!(result0 == NIM_NIL)) goto LA133; result0 = gettypename_533313_839829468(t_534942_839829468); { NIM_BOOL LOC137; NimStringDesc* LOC140; TY532811 LOC141; LOC137 = (NIM_BOOL)0; LOC137 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC137)) goto LA138; LOC140 = (NimStringDesc*)0; LOC140 = getforwardstructformat_534015_839829468(m0); memset((void*)LOC141, 0, sizeof(LOC141)); LOC141[0] = structorunion_534001_839829468(t_534942_839829468); LOC141[1] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2); } LA138: ; LOC142 = (Tidobj199004*)0; LOC142 = &t_534942_839829468->Sup; LOC143 = (TNimObject*)0; LOC143 = &result0->Sup; idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143); } LA133: ; LOC144 = (Tidobj199004*)0; LOC144 = &t_534942_839829468->Sup; LOC145 = (Ropeobj178006*)0; LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53)); LOC146 = (TNimObject*)0; LOC146 = &LOC145->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146); { NIM_BOOL LOC149; LOC149 = (NIM_BOOL)0; LOC149 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC149)) goto LA150; { Ttype292840* LOC154; NimStringDesc* LOC157; NimStringDesc* LOC158; TY532811 LOC166; LOC154 = (Ttype292840*)0; LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256)); if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155; LOC157 = (NimStringDesc*)0; LOC158 = (NimStringDesc*)0; { NIM_BOOL LOC161; LOC161 = (NIM_BOOL)0; LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC161) goto LA162; LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA162: ; if (!LOC161) goto LA163; LOC158 = copyString(((NimStringDesc*) &T839829468_76)); } goto LA159; LA163: ; { LOC158 = copyString(((NimStringDesc*) &T839829468_77)); } LA159: ; LOC157 = rawNewString(LOC158->Sup.len + 31); appendString(LOC157, LOC158); appendString(LOC157, ((NimStringDesc*) &T839829468_78)); memset((void*)LOC166, 0, sizeof(LOC166)); LOC166[0] = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0); LOC166[1] = result0; appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2); } goto LA152; LA155: ; { result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79)); } LA152: ; } LA150: ; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53)); } break; case ((Ttypekind292244) 4): case ((Ttypekind292244) 16): { NI64 n0; Tidobj199004* LOC173; TNimObject* LOC174; n0 = lengthord_320007_3876443242(t_534942_839829468); { if (!(n0 <= IL64(0))) goto LA171; n0 = IL64(1); } LA171: ; result0 = gettypename_533313_839829468(t_534942_839829468); LOC173 = (Tidobj199004*)0; LOC173 = &t_534942_839829468->Sup; LOC174 = (TNimObject*)0; LOC174 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174); { NIM_BOOL LOC177; Ropeobj178006* foo0; TY535238 LOC180; LOC177 = (NIM_BOOL)0; LOC177 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC177)) goto LA178; foo0 = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0); memset((void*)LOC180, 0, sizeof(LOC180)); LOC180[0] = foo0; LOC180[1] = result0; LOC180[2] = rope_178401_2381377266(n0); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3); } LA178: ; } break; case ((Ttypekind292244) 17): case ((Ttypekind292244) 18): { { NIM_BOOL LOC184; Ropeobj178006* cppname0; NI i0; NI chunkstart0; Ropeobj178006* LOC226; LOC184 = (NIM_BOOL)0; LOC184 = isimportedcpptype_533478_839829468(t_534942_839829468); if (!(LOC184)) goto LA185; LOC184 = ((*typ0).kind == ((Ttypekind292244) 11)); LA185: ; if (!LOC184) goto LA186; cppname0 = gettypename_533313_839829468(t_534942_839829468); i0 = ((NI) 0); chunkstart0 = ((NI) 0); { while (1) { if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189; { NI chunkend0; NI idx0; NI stars0; if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192; chunkend0 = (i0 - 1); idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC196; NimStringDesc* LOC199; Ttype292840* typeinslot0; LOC196 = (NIM_BOOL)0; LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0)); if (!LOC196) goto LA197; LOC199 = (NimStringDesc*)0; LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0); add_178487_2381377266(&result0, LOC199); chunkstart0 = i0; typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0); { NIM_BOOL LOC202; TY533289 LOC206; Ropeobj178006* LOC207; LOC202 = (NIM_BOOL)0; LOC202 = (typeinslot0 == NIM_NIL); if (LOC202) goto LA203; LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62)); LA203: ; if (!LOC202) goto LA204; memset((void*)LOC206, 0, sizeof(LOC206)); LOC207 = (Ropeobj178006*)0; LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0); add_178482_2381377266(&result0, LOC207); } goto LA200; LA204: ; { Ropeobj178006* LOC209; LOC209 = (Ropeobj178006*)0; LOC209 = gettypedescaux_533505_839829468(m0, typeinslot0, check0); add_178482_2381377266(&result0, LOC209); } LA200: ; } LA197: ; } goto LA190; LA192: ; { i0 += ((NI) 1); } LA190: ; } LA189: ; } { NimStringDesc* LOC215; if (!!((chunkstart0 == ((NI) 0)))) goto LA213; LOC215 = (NimStringDesc*)0; LOC215 = copyStr((*cppname0).data, chunkstart0); add_178487_2381377266(&result0, LOC215); } goto LA211; LA213: ; { result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82)); { NI i_535516_839829468; NI HEX3Atmp_535665_839829468; NI LOC218; NI res_535668_839829468; i_535516_839829468 = (NI)0; HEX3Atmp_535665_839829468 = (NI)0; LOC218 = (NI)0; LOC218 = len_295339_850551059(typ0); HEX3Atmp_535665_839829468 = (NI)(LOC218 - ((NI) 2)); res_535668_839829468 = ((NI) 1); { while (1) { Ropeobj178006* LOC225; if (!(res_535668_839829468 <= HEX3Atmp_535665_839829468)) goto LA220; i_535516_839829468 = res_535668_839829468; { if (!(((NI) 1) < i_535516_839829468)) goto LA223; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83)); } LA223: ; LOC225 = (Ropeobj178006*)0; LOC225 = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0); add_178482_2381377266(&result0, LOC225); res_535668_839829468 += ((NI) 1); } LA220: ; } } add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84)); } LA211: ; LOC226 = (Ropeobj178006*)0; LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0); } goto LA182; LA186: ; { Tidobj199004* LOC241; TNimObject* LOC242; Ropeobj178006* recdesc0; result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468); { Tidobj199004* LOC239; TNimObject* LOC240; if (!(result0 == NIM_NIL)) goto LA230; result0 = gettypename_533313_839829468(t_534942_839829468); { NIM_BOOL LOC234; NimStringDesc* LOC237; TY532811 LOC238; LOC234 = (NIM_BOOL)0; LOC234 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC234)) goto LA235; LOC237 = (NimStringDesc*)0; LOC237 = getforwardstructformat_534015_839829468(m0); memset((void*)LOC238, 0, sizeof(LOC238)); LOC238[0] = structorunion_534001_839829468(t_534942_839829468); LOC238[1] = result0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2); } LA235: ; LOC239 = (Tidobj199004*)0; LOC239 = &t_534942_839829468->Sup; LOC240 = (TNimObject*)0; LOC240 = &result0->Sup; idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240); } LA230: ; LOC241 = (Tidobj199004*)0; LOC241 = &t_534942_839829468->Sup; LOC242 = (TNimObject*)0; LOC242 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242); { if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245; recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0); } goto LA243; LA245: ; { recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0); } LA243: ; { NIM_BOOL LOC250; LOC250 = (NIM_BOOL)0; LOC250 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC250)) goto LA251; add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0); } LA251: ; } LA182: ; } break; case ((Ttypekind292244) 19): { Ttype292840* LOC254; Ropeobj178006* LOC255; Tidobj199004* LOC256; TNimObject* LOC257; LOC254 = (Ttype292840*)0; LOC254 = lastson_295377_850551059(t_534942_839829468); LOC255 = (Ropeobj178006*)0; LOC255 = gettypename_533313_839829468(LOC254); result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105)); LOC256 = (Tidobj199004*)0; LOC256 = &t_534942_839829468->Sup; LOC257 = (TNimObject*)0; LOC257 = &result0->Sup; idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257); { NIM_BOOL LOC260; NI s0; NI64 LOC263; LOC260 = (NIM_BOOL)0; LOC260 = isimportedtype_533451_839829468(t_534942_839829468); if (!!(LOC260)) goto LA261; LOC263 = (NI64)0; LOC263 = getsize_320135_3876443242(t_534942_839829468); s0 = ((NI) (LOC263)); switch (s0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { TY532811 LOC265; memset((void*)LOC265, 0, sizeof(LOC265)); LOC265[0] = result0; LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8))))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2); } break; default: { TY532811 LOC267; NI64 LOC268; memset((void*)LOC267, 0, sizeof(LOC267)); LOC267[0] = result0; LOC268 = (NI64)0; LOC268 = getsize_320135_3876443242(t_534942_839829468); LOC267[1] = rope_178401_2381377266(LOC268); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2); } break; } } LA261: ; } break; case ((Ttypekind292244) 11): case ((Ttypekind292244) 13): case ((Ttypekind292244) 15): case ((Ttypekind292244) 46): case ((Ttypekind292244) 47): case ((Ttypekind292244) 49): case ((Ttypekind292244) 8): { Ttype292840* LOC270; LOC270 = (Ttype292840*)0; LOC270 = lastson_295377_850551059(t_534942_839829468); result0 = gettypedescaux_533505_839829468(m0, LOC270, check0); } break; default: { NimStringDesc* LOC272; LOC272 = (NimStringDesc*)0; LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16); appendString(LOC272, ((NimStringDesc*) &T839829468_108)); appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))); appendChar(LOC272, 41); internalerror_196113_155036129(LOC272); result0 = NIM_NIL; } break; } excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id); }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((IL64(576460752303423744) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0); return result0; } N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) { Tstorageloc292812 result0; result0 = (Tstorageloc292812)0; { Ttype292840* LOC3; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864); if (!!(((IL64(281475110993936) &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0))) goto LA4; result0 = ((Tstorageloc292812) 2); } goto LA1; LA4: ; { result0 = ((Tstorageloc292812) 0); } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0) { NIM_BOOL result0; Ttype292840* pt0; { result0 = (NIM_BOOL)0; pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256)); { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3; result0 = NIM_TRUE; goto BeforeRet; } goto LA1; LA3: ; { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6; result0 = NIM_FALSE; goto BeforeRet; } goto LA1; LA6: ; LA1: ; switch ((*pt0).kind) { case ((Ttypekind292244) 17): { { NIM_BOOL LOC11; NI64 LOC13; LOC11 = (NIM_BOOL)0; LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0); if (LOC11) goto LA12; LOC13 = (NI64)0; LOC13 = getsize_320135_3876443242(pt0); LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13); LA12: ; if (!LOC11) goto LA14; result0 = NIM_TRUE; } goto LA9; LA14: ; { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0); if (!(LOC17)) goto LA18; LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL); LA18: ; if (!LOC17) goto LA19; result0 = NIM_FALSE; } goto LA9; LA19: ; { result0 = NIM_TRUE; } LA9: ; } break; case ((Ttypekind292244) 18): { NIM_BOOL LOC23; NI64 LOC24; LOC23 = (NIM_BOOL)0; LOC24 = (NI64)0; LOC24 = getsize_320135_3876443242(pt0); LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24); if (LOC23) goto LA25; LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0); LA25: ; result0 = LOC23; } break; default: { result0 = NIM_FALSE; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0) { Tctypekind529007 result0; result0 = (Tctypekind529007)0; result0 = maptype_533394_839829468(typ0); return result0; } N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) { unsureAsgnRef((void**) (&(*params0)), NIM_NIL); { NIM_BOOL LOC3; TY533289 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL); if (LOC3) goto LA4; LOC3 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0)); } goto LA1; LA5: ; { unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533505_839829468(m0, (*t0).sons->data[((NI) 0)], check0)); } LA1: ; { NI i_534152_839829468; NI HEX3Atmp_534353_839829468; NI LOC10; NI res_534356_839829468; i_534152_839829468 = (NI)0; HEX3Atmp_534353_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = sonslen_295351_850551059((*t0).n); HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1)); res_534356_839829468 = ((NI) 1); { while (1) { if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12; i_534152_839829468 = res_534356_839829468; { Tsym292834* param0; Ropeobj178006* LOC29; Tstorageloc292812 LOC30; TY533289 LOC45; Ropeobj178006* LOC46; Ttype292840* arr0; NI j0; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16; internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109)); } LA16: ; param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym; { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ); if (!LOC20) goto LA21; goto LA13; } LA21: ; { TY533289 LOC27; Ropeobj178006* LOC28; if (!!(((*params0) == NIM_NIL))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj178006*)0; LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0); add_178482_2381377266(params0, LOC28); } LA25: ; LOC29 = (Ropeobj178006*)0; LOC29 = manglename_533205_839829468(param0); LOC30 = (Tstorageloc292812)0; LOC30 = paramstorageloc_534098_839829468(param0); fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30); { NIM_BOOL LOC33; Ropeobj178006* LOC36; TY533289 LOC37; Ropeobj178006* LOC38; LOC33 = (NIM_BOOL)0; LOC33 = ccgintroducedptr_533611_839829468(param0); if (!LOC33) goto LA34; LOC36 = (Ropeobj178006*)0; LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0); add_178482_2381377266(params0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj178006*)0; LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0); add_178482_2381377266(params0, LOC38); (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc292812) 0); } goto LA31; LA34: ; { Ropeobj178006* LOC42; if (!weakdep0) goto LA40; LOC42 = (Ropeobj178006*)0; LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0); add_178482_2381377266(params0, LOC42); } goto LA31; LA40: ; { Ropeobj178006* LOC44; LOC44 = (Ropeobj178006*)0; LOC44 = gettypedescaux_533505_839829468(m0, (*param0).typ, check0); add_178482_2381377266(params0, LOC44); } LA31: ; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj178006*)0; LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0); add_178482_2381377266(params0, LOC46); add_178482_2381377266(params0, (*param0).loc.r); arr0 = (*param0).typ; { if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49; arr0 = (*arr0).sons->data[((NI) 0)]; } LA49: ; j0 = ((NI) 0); { while (1) { TY532811 LOC57; if (!((IL64(281475110928384) &((NU64)1<<((NU)((*arr0).kind)&63U)))!=0)) goto LA52; { if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55; (*param0).loc.s = ((Tstorageloc292812) 0); } LA55: ; memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = (*param0).loc.r; LOC57[1] = rope_178401_2381377266(((NI64) (j0))); addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2); j0 += ((NI) 1); arr0 = (*arr0).sons->data[((NI) 0)]; } LA52: ; } } LA13: ; res_534356_839829468 += ((NI) 1); } LA12: ; } } { NIM_BOOL LOC60; Ttype292840* arr0; TY533289 LOC76; LOC60 = (NIM_BOOL)0; LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); if (!(LOC60)) goto LA61; LOC60 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]); LA61: ; if (!LOC60) goto LA62; arr0 = (*t0).sons->data[((NI) 0)]; { if (!!(((*params0) == NIM_NIL))) goto LA66; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA66: ; { Tctypekind529007 LOC70; Ropeobj178006* LOC73; LOC70 = (Tctypekind529007)0; LOC70 = mapreturntype_533447_839829468((*t0).sons->data[((NI) 0)]); if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71; LOC73 = (Ropeobj178006*)0; LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0); add_178482_2381377266(params0, LOC73); add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53)); } goto LA68; LA71: ; { Ropeobj178006* LOC75; LOC75 = (Ropeobj178006*)0; LOC75 = gettypedescaux_533505_839829468(m0, arr0, check0); add_178482_2381377266(params0, LOC75); } LA68: ; memset((void*)LOC76, 0, sizeof(LOC76)); addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0); } LA62: ; { NIM_BOOL LOC79; LOC79 = (NIM_BOOL)0; LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8)); if (!(LOC79)) goto LA80; LOC79 = declareenvironment0; LA80: ; if (!LOC79) goto LA81; { if (!!(((*params0) == NIM_NIL))) goto LA85; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA85: ; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114)); } LA81: ; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89; { if (!!(((*params0) == NIM_NIL))) goto LA93; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA93: ; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115)); } LA89: ; { if (!((*params0) == NIM_NIL)) goto LA97; add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116)); } goto LA95; LA97: ; { add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117)); } LA95: ; unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0))); } N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) { Ropeobj178006* result0; Ropeobj178006* rettype0; Ropeobj178006* params0; Intset268030 check0; Ropeobj178006* LOC13; result0 = (Ropeobj178006*)0; rettype0 = (Ropeobj178006*)0; params0 = (Ropeobj178006*)0; genclinedir_532813_839829468(&result0, (*prc0).info); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3; { if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } goto LA5; LA7: ; { add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23)); } LA5: ; } goto LA1; LA3: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24)); } goto LA1; LA11: ; LA1: ; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_268885_2627731572((&check0)); LOC13 = (Ropeobj178006*)0; LOC13 = manglename_533205_839829468(prc0); fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0)); genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, &params0, (&check0), NIM_TRUE, NIM_FALSE); { TY535235 LOC18; if (!(*prc0).constraint == 0) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*(*prc0).typ).callconv)- 0]); LOC18[1] = rettype0; LOC18[2] = (*prc0).loc.r; LOC18[3] = params0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4); } goto LA14; LA16: ; { TY535238 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rettype0; LOC20[1] = (*prc0).loc.r; LOC20[2] = params0; result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3); } LA14: ; return result0; } static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) { Tnode292802* result0; result0 = (Tnode292802*)0; result0 = (*n0).kindU.S6.sons->data[i0]; return result0; } N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) { Tnode292802* result0; { result0 = (Tnode292802*)0; switch ((*n0).kind) { case ((Tnodekind292020) 115): case ((Tnodekind292020) 126): { NI i0; i0 = ((NI) 0); { while (1) { NIM_BOOL LOC4; NI LOC5; Tnode292802* LOC7; LOC4 = (NIM_BOOL)0; LOC5 = (NI)0; LOC5 = len_293081_850551059(n0); LOC4 = (i0 < LOC5); if (!(LOC4)) goto LA6; LOC7 = (Tnode292802*)0; LOC7 = HEX5BHEX5D_293238_850551059(n0, i0); LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125)); LA6: ; if (!LOC4) goto LA3; i0 += ((NI) 1); } LA3: ; } { NI LOC10; Tnode292802* LOC13; LOC10 = (NI)0; LOC10 = len_293081_850551059(n0); if (!(i0 < LOC10)) goto LA11; LOC13 = (Tnode292802*)0; LOC13 = HEX5BHEX5D_293238_850551059(n0, i0); result0 = easyresultasgn_560191_839829468(LOC13); } LA11: ; } break; case ((Tnodekind292020) 73): case ((Tnodekind292020) 74): { { NIM_BOOL LOC17; Tnode292802* LOC18; Tnode292802* LOC20; LOC17 = (NIM_BOOL)0; LOC18 = (Tnode292802*)0; LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0)); LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3)); if (!(LOC17)) goto LA19; LOC20 = (Tnode292802*)0; LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0)); LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind); LA19: ; if (!LOC17) goto LA21; (*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8)); result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1)); goto BeforeRet; } LA21: ; } break; case ((Tnodekind292020) 109): { { NI LOC26; Tnode292802* LOC29; LOC26 = (NI)0; LOC26 = len_293081_850551059(n0); if (!(((NI) 0) < LOC26)) goto LA27; LOC29 = (Tnode292802*)0; LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0)); result0 = easyresultasgn_560191_839829468(LOC29); { if (!!((result0 == NIM_NIL))) goto LA32; (*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8)); } LA32: ; } LA27: ; } break; default: { } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0) { Ropeobj178006* result0; Intset268030 check0; result0 = (Ropeobj178006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_268885_2627731572((&check0)); result0 = gettypedescaux_533505_839829468(m0, typ0, (&check0)); return result0; } N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { Ropeobj178006* LOC5; if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = manglename_533205_839829468(s0); fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2)); { if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8; (*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8)); } LA8: ; } LA3: ; result0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t); { if (!(*s0).constraint == 0) goto LA12; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121)); } LA16: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122)); } LA20: ; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111)); add_178482_2381377266(&result0, (*s0).loc.r); } goto LA10; LA12: ; { TY532811 LOC23; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = result0; LOC23[1] = (*s0).loc.r; result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2); } LA10: ; return result0; } N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) { (*result0).k = k0; (*result0).s = s0; unsureAsgnRef((void**) (&(*result0).t), typ0); unsureAsgnRef((void**) (&(*result0).r), NIM_NIL); (*result0).flags = 0; } N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) { initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0)); (*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8)); expr_539248_839829468(p0, e0, result0); } static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) { Ropeobj178006** result0; result0 = (Ropeobj178006**)0; result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0]; return result0; } N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = r0; { NI i_532680_839829468; NI HEX3Atmp_532683_839829468; NI res_532686_839829468; i_532680_839829468 = (NI)0; HEX3Atmp_532683_839829468 = (NI)0; HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); res_532686_839829468 = ((NI) 0); { while (1) { if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3; i_532680_839829468 = res_532686_839829468; prepend_178893_2381377266(&result0, indent_532655_839829468); res_532686_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; Ropeobj178006* LOC3; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj178006*)0; LOC3 = indentline_532656_839829468(p0, LOC2); add_178482_2381377266(LOC1, LOC3); } N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = (*a0).r; { TY178507 LOC5; if (!(((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = result0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = indentline_532656_839829468(p0, r0); add_178482_2381377266(LOC1, LOC2); } N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; Ropeobj178006* LOC3; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0); LOC3 = (Ropeobj178006*)0; LOC3 = indentline_532656_839829468(p0, LOC2); add_178482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) { NI nimtypekind0; Ropeobj178006* size0; TY535235 LOC17; NI flags0; Ropeobj178006* LOC33; TY532811 LOC34; NimStringDesc* LOC35; nimtypekind0 = (NI)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isobjlackingtypefield_533515_839829468(typ0); if (!LOC3) goto LA4; nimtypekind0 = ((NI) 18); } goto LA1; LA4: ; { nimtypekind0 = ((NI) ((*typ0).kind)); } LA1: ; size0 = (Ropeobj178006*)0; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9; size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133)); } goto LA7; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC12) goto LA13; LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; size0 = gettypedesc_535673_839829468(m0, origtype0); } goto LA7; LA14: ; { size0 = gettypedesc_535673_839829468(m0, typ0); } LA7: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = name0; LOC17[1] = size0; LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0))); LOC17[3] = base0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4); flags0 = ((NI) 0); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = containsgarbagecollectedref_320117_3876443242(typ0); if (!!(LOC20)) goto LA21; flags0 = (NI)(flags0 | ((NI) 1)); } LA21: ; { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = canformacycle_320123_3876443242(typ0); if (!!(LOC25)) goto LA26; flags0 = (NI)(flags0 | ((NI) 2)); } LA26: ; { TY532811 LOC32; if (!!((flags0 == ((NI) 0)))) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; LOC32[1] = rope_178401_2381377266(((NI64) (flags0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2); } LA30: ; LOC33 = (Ropeobj178006*)0; LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129)); memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = name0; LOC35 = (NimStringDesc*)0; LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0)); LOC34[1] = rope_178277_2381377266(LOC35); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2); } N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) { Ropeobj178006* result0; TY532811 LOC1; result0 = (Ropeobj178006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = (*m0).typenodesname; LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes))); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2); (*m0).typenodes += ((NI) 1); return result0; } N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) { Ropeobj178006* LOC1; Ropeobj178006* expr0; NI length0; TY532811 LOC15; LOC1 = (Ropeobj178006*)0; LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1); expr0 = getnimnode_535945_839829468(m0); length0 = sonslen_295327_850551059(typ0); { Ropeobj178006* tmp0; TY532811 LOC6; TY535238 LOC12; if (!(((NI) 0) < length0)) goto LA4; tmp0 = gettempname_533598_839829468(m0); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = tmp0; LOC6[1] = rope_178401_2381377266(((NI64) (length0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2); { NI i_536573_839829468; NI HEX3Atmp_536592_839829468; NI res_536595_839829468; i_536573_839829468 = (NI)0; HEX3Atmp_536592_839829468 = (NI)0; HEX3Atmp_536592_839829468 = (NI)(length0 - ((NI) 1)); res_536595_839829468 = ((NI) 0); { while (1) { Ttype292840* a0; Ropeobj178006* tmp20; TY535238 LOC10; TY535235 LOC11; if (!(res_536595_839829468 <= HEX3Atmp_536592_839829468)) goto LA9; i_536573_839829468 = res_536595_839829468; a0 = (*typ0).sons->data[i_536573_839829468]; tmp20 = getnimnode_535945_839829468(m0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0; LOC10[1] = rope_178401_2381377266(((NI64) (i_536573_839829468))); LOC10[2] = tmp20; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = tmp20; LOC11[1] = gettypedesc_535673_839829468(m0, typ0); LOC11[2] = rope_178401_2381377266(((NI64) (i_536573_839829468))); LOC11[3] = gentypeinfo_535941_839829468(m0, a0); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4); res_536595_839829468 += ((NI) 1); } LA9: ; } } memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = expr0; LOC12[1] = rope_178401_2381377266(((NI64) (length0))); LOC12[2] = tmp0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3); } goto LA2; LA4: ; { TY532811 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_178401_2381377266(((NI64) (length0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2); } LA2: ; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = name0; LOC15[1] = expr0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2); } N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) { Ttype292840* result0; Ttype292840* LOC1; Ttype292840* r0; Ttype292840* LOC2; result0 = (Ttype292840*)0; result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0); LOC1 = (Ttype292840*)0; LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0); rawaddson_296394_850551059(result0, LOC1); r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0); LOC2 = (Ttype292840*)0; LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0); rawaddson_296394_850551059(r0, LOC2); rawaddson_296394_850551059(result0, r0); return result0; } N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) { Ropeobj178006* base0; base0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; NI LOC4; Ttype292840* x0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = sonslen_295327_850551059(typ0); LOC3 = (((NI) 0) < LOC4); if (!(LOC3)) goto LA5; LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL)); LA5: ; if (!LOC3) goto LA6; x0 = (*typ0).sons->data[((NI) 0)]; { if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10; x0 = skiptypes_296099_850551059(x0, IL64(211106247215360)); } LA10: ; base0 = gentypeinfo_535941_839829468(m0, x0); } goto LA1; LA6: ; { base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18)); } LA1: ; gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0); } static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((983056 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0); if (LOC1) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind292244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8)); LA4: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) { { NIM_BOOL LOC5; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8)); LOC5 = (NIM_BOOL)0; LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151)); } LA3: ; } N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = (*a0).r; { NIM_BOOL LOC3; Tctypekind529007 LOC5; Ropeobj178006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = !((((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)); if (!(LOC3)) goto LA4; LOC5 = (Tctypekind529007)0; LOC5 = maptype_533394_839829468((*a0).t); LOC3 = !((LOC5 == ((Tctypekind529007) 17))); LA4: ; if (!LOC3) goto LA6; LOC8 = (Ropeobj178006*)0; LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0); result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117)); } LA6: ; return result0; } N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0) { Ttypefieldresult320145 LOC1; LOC1 = (Ttypefieldresult320145)0; LOC1 = analyseobjectwithtypefield_320149_3876443242(t0); switch (LOC1) { case ((Ttypefieldresult320145) 0): { } break; case ((Ttypefieldresult320145) 1): { Ropeobj178006* r0; Ttype292840* s0; TY532811 LOC19; r0 = rdloc_538188_839829468(a0); { TY178507 LOC8; if (!!(takeaddr0)) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = r0; r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1); } LA6: ; s0 = skiptypes_296099_850551059(t0, IL64(211106232576256)); { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA12: ; if (!!(LOC11)) goto LA13; { while (1) { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = ((*s0).kind == ((Ttypekind292244) 17)); if (!(LOC17)) goto LA18; LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL)); LA18: ; if (!LOC17) goto LA16; add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360)); } LA16: ; } } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = r0; LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0); linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2); } break; case ((Ttypefieldresult320145) 2): { Ropeobj178006* r0; TY532811 LOC26; { if (!takeaddr0) goto LA23; r0 = addrloc_538204_839829468(a0); } goto LA21; LA23: ; { r0 = rdloc_538188_839829468(a0); } LA21: ; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = r0; LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0); linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2); } break; } } N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0) { Ttype292840* typ0; typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106233624832)); { NIM_BOOL LOC3; TY532811 LOC6; LOC3 = (NIM_BOOL)0; LOC3 = iscomplexvaluetype_538317_839829468(typ0); if (!!(LOC3)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_538188_839829468(loc0); LOC6[1] = gettypedesc_535673_839829468((*p0).module, typ0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2); } goto LA1; LA4: ; { { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = !(istemp0); if (LOC10) goto LA11; LOC10 = containsgarbagecollectedref_320117_3876443242((*loc0).t); LA11: ; if (!LOC10) goto LA12; { NIM_BOOL LOC16; TY532811 LOC19; LOC16 = (NIM_BOOL)0; LOC16 = isimportedcpptype_533478_839829468(typ0); if (!!(LOC16)) goto LA17; usestringh_532345_839829468((*p0).module); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_538204_839829468(loc0); LOC19[1] = rdloc_538188_839829468(loc0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2); } LA17: ; } LA12: ; genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, loc0, NIM_TRUE); } LA1: ; } N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) { Ropeobj178006* LOC1; TY532811 LOC2; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj178006*)0; LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels))); unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC2[1] = (*result0).r; linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2); (*result0).k = ((Tlockind292808) 1); unsureAsgnRef((void**) (&(*result0).t), t0); (*result0).s = ((Tstorageloc292812) 2); (*result0).flags = 0; constructloc_538388_839829468(p0, (&(*result0)), !(needsinit0)); } static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; TY178507 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = accessor0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1); } goto LA1; LA5: ; { result0 = accessor0; } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (IL64(-2147483648) < i0); if (!(LOC3)) goto LA4; LOC3 = (i0 <= IL64(2147483647)); LA4: ; if (!LOC3) goto LA5; result0 = rope_178401_2381377266(i0); } goto LA1; LA5: ; { TY533289 LOC10; if (!(i0 == IL64(-2147483648))) goto LA8; memset((void*)LOC10, 0, sizeof(LOC10)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0); } goto LA1; LA8: ; { TY178507 LOC14; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_178401_2381377266(i0); result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1); } goto LA1; LA12: ; { TY533289 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { TY178507 LOC5; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_178401_2381377266(i0); result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1); } goto LA1; LA3: ; { TY533289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) { Ropeobj178006* result0; NimStringDesc* LOC1; NimStringDesc* LOC2; result0 = (Ropeobj178006*)0; LOC1 = (NimStringDesc*)0; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_8401_1689653243(i0); LOC1 = rawNewString(LOC2->Sup.len + 3); appendString(LOC1, LOC2); appendString(LOC1, ((NimStringDesc*) &T839829468_171)); result0 = rope_178277_2381377266(LOC1); return result0; } N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) { Ropeobj178006* result0; Ropeobj178006* LOC1; TY535238 LOC2; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79)); result0 = gettempname_533598_839829468(m0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = result0; LOC2[1] = makecstring_191638_155036129(s0); LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0)))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3); return result0; } N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!(ty0 == NIM_NIL)) goto LA3; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165)); } LA3: ; switch ((*n0).kind) { case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15): { Ttype292840* LOC6; LOC6 = (Ttype292840*)0; LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440)); switch ((*LOC6).kind) { case ((Ttypekind292244) 2): case ((Ttypekind292244) 5): { result0 = intliteral_539270_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind292244) 1): { { TY533289 LOC13; if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0); } goto LA9; LA11: ; { TY533289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0); } LA9: ; } break; case ((Ttypekind292244) 35): { result0 = int64literal_549430_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind292244) 44): { result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval))); } break; default: { TY532811 LOC19; Ttype292840* LOC20; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ttype292840*)0; LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440)); LOC19[0] = gettypedesc_535673_839829468((*p0).module, LOC20); LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2); } break; } } break; case ((Tnodekind292020) 23): { Ttype292840* t0; t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440)); { NIM_BOOL LOC24; NI id0; Ropeobj178006* LOC28; LOC24 = (NIM_BOOL)0; LOC24 = ((*t0).kind == ((Ttypekind292244) 25)); if (!(LOC24)) goto LA25; LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8)); LA25: ; if (!LOC24) goto LA26; id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC28 = (Ropeobj178006*)0; LOC28 = rope_178401_2381377266(((NI64) (id0))); result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28); { TY532811 LOC33; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC33[1] = result0; addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2); } LA31: ; } goto LA22; LA26: ; { result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174)); } LA22: ; } break; case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22): { { TY533289 LOC40; if (!(*n0).kindU.S3.strval == 0) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0); } goto LA36; LA38: ; { Ttype292840* LOC42; NI id0; LOC42 = (Ttype292840*)0; LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440)); if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43; id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); { TY178507 LOC49; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval); result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1); } goto LA45; LA47: ; { TY532811 LOC51; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = (*(*p0).module).tmpbase; LOC51[1] = rope_178401_2381377266(((NI64) (id0))); result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2); } LA45: ; } goto LA36; LA43: ; { result0 = makecstring_191638_155036129((*n0).kindU.S3.strval); } LA36: ; } break; case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18): { NimStringDesc* LOC54; LOC54 = (NimStringDesc*)0; LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval); result0 = rope_178277_2381377266(LOC54); } break; default: { NimStringDesc* LOC56; LOC56 = (NimStringDesc*)0; LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12); appendString(LOC56, ((NimStringDesc*) &T839829468_179)); appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020))); appendChar(LOC56, 41); internalerror_196100_155036129((*n0).info, LOC56); result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = genliteral_549476_839829468(p0, n0, (*n0).typ); return result0; } N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) { NI length0; length0 = len_293081_850551059(branch0); { NI j_547677_839829468; NI HEX3Atmp_547718_839829468; NI res_547721_839829468; j_547677_839829468 = (NI)0; HEX3Atmp_547718_839829468 = (NI)0; HEX3Atmp_547718_839829468 = (NI)(length0 - ((NI) 2)); res_547721_839829468 = ((NI) 0); { while (1) { if (!(res_547721_839829468 <= HEX3Atmp_547718_839829468)) goto LA3; j_547677_839829468 = res_547721_839829468; { Tnode292802* LOC6; LOC6 = (Tnode292802*)0; LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7; { TY532811 LOC13; Tnode292802* LOC14; Tnode292802* LOC15; Tnode292802* LOC16; Tnode292802* LOC17; if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); LOC14 = (Tnode292802*)0; LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); LOC15 = (Tnode292802*)0; LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0)); LOC13[0] = genliteral_539273_839829468(p0, LOC15); LOC16 = (Tnode292802*)0; LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); LOC17 = (Tnode292802*)0; LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1)); LOC13[1] = genliteral_539273_839829468(p0, LOC17); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2); } goto LA9; LA11: ; { Tnode292802* v0; Tnode292802* LOC19; Tnode292802* LOC20; LOC19 = (Tnode292802*)0; LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); LOC20 = (Tnode292802*)0; LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0)); v0 = copynode_296528_850551059(LOC20); { while (1) { Tnode292802* LOC23; Tnode292802* LOC24; TY178507 LOC25; LOC23 = (Tnode292802*)0; LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); LOC24 = (Tnode292802*)0; LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1)); if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = genliteral_539273_839829468(p0, v0); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1); (*v0).kindU.S1.intval += ((NI) 1); } LA22: ; } } LA9: ; } goto LA4; LA7: ; { TY178507 LOC27; Tnode292802* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Tnode292802*)0; LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468); LOC27[0] = genliteral_539273_839829468(p0, LOC28); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1); } LA4: ; res_547721_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) { { { if (!(n0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; switch ((*n0).kind) { case ((Tnodekind292020) 138): { { NI i_537068_839829468; NI HEX3Atmp_537239_839829468; NI LOC7; NI res_537242_839829468; i_537068_839829468 = (NI)0; HEX3Atmp_537239_839829468 = (NI)0; LOC7 = (NI)0; LOC7 = sonslen_295351_850551059(n0); HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1)); res_537242_839829468 = ((NI) 0); { while (1) { if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9; i_537068_839829468 = res_537242_839829468; gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]); res_537242_839829468 += ((NI) 1); } LA9: ; } } } break; case ((Tnodekind292020) 139): { Tcproc529021* p0; Tsym292834* disc0; TY532811 LOC15; TY533289 LOC28; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162)); } LA13: ; p0 = (*c0).p; disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = accessor0; LOC15[1] = (*disc0).loc.r; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2); { NI i_537098_839829468; NI HEX3Atmp_537249_839829468; NI LOC17; NI res_537252_839829468; i_537098_839829468 = (NI)0; HEX3Atmp_537249_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = sonslen_295351_850551059(n0); HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1)); res_537252_839829468 = ((NI) 1); { while (1) { Tnode292802* branch0; Tnode292802* LOC26; TY533289 LOC27; if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19; i_537098_839829468 = res_537252_839829468; branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468]; { if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22; gencaserange_537028_839829468((*c0).p, branch0); } goto LA20; LA22: ; { TY533289 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0); } LA20: ; LOC26 = (Tnode292802*)0; LOC26 = lastson_295364_850551059(branch0); gentraverseproc_537039_839829468(c0, accessor0, LOC26); memset((void*)LOC27, 0, sizeof(LOC27)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0); res_537252_839829468 += ((NI) 1); } LA19: ; } } memset((void*)LOC28, 0, sizeof(LOC28)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0); } break; case ((Tnodekind292020) 3): { Tsym292834* field0; TY532811 LOC34; Ropeobj178006* LOC35; field0 = (*n0).kindU.S4.sym; { if (!((*field0).loc.t == NIM_NIL)) goto LA32; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } LA32: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = accessor0; LOC34[1] = (*field0).loc.r; LOC35 = (Ropeobj178006*)0; LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2); gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t); } break; default: { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } break; } }BeforeRet: ; } N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; Ropeobj178006* LOC3; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj178006*)0; LOC3 = indentline_532656_839829468(p0, LOC2); add_178482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) { Ttype292840* typ_537302_839829468; Tcproc529021* p0; { { if (!(typ_537027_839829468 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468); p0 = (*c0).p; switch ((*typ_537302_839829468).kind) { case ((Ttypekind292244) 11): case ((Ttypekind292244) 10): case ((Ttypekind292244) 8): { Ttype292840* LOC6; LOC6 = (Ttype292840*)0; LOC6 = lastson_295377_850551059(typ_537302_839829468); gentraverseproc_537022_839829468(c0, accessor0, LOC6); } break; case ((Ttypekind292244) 4): case ((Ttypekind292244) 16): { NI64 arraysize0; Tloc292816 i0; Ttype292840* LOC8; TY532811 LOC9; TY532811 LOC10; Ropeobj178006* LOC11; TY533289 LOC12; arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]); memset((void*)(&i0), 0, sizeof(i0)); LOC8 = (Ttype292840*)0; LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = i0.r; LOC9[1] = rope_178401_2381377266(arraysize0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = accessor0; LOC10[1] = i0.r; LOC11 = (Ropeobj178006*)0; LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2); gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]); memset((void*)LOC12, 0, sizeof(LOC12)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0); } break; case ((Ttypekind292244) 17): { { NI i_537325_839829468; NI HEX3Atmp_537384_839829468; NI LOC15; NI res_537387_839829468; i_537325_839829468 = (NI)0; HEX3Atmp_537384_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = sonslen_295327_850551059(typ_537302_839829468); HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1)); res_537387_839829468 = ((NI) 0); { while (1) { Ttype292840* x0; Ropeobj178006* LOC22; if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17; i_537325_839829468 = res_537387_839829468; x0 = (*typ_537302_839829468).sons->data[i_537325_839829468]; { if (!!((x0 == NIM_NIL))) goto LA20; x0 = skiptypes_296099_850551059(x0, IL64(211106247215360)); } LA20: ; LOC22 = (Ropeobj178006*)0; LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module); gentraverseproc_537022_839829468(c0, LOC22, x0); res_537387_839829468 += ((NI) 1); } LA17: ; } } { if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25; gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n); } LA25: ; } break; case ((Ttypekind292244) 18): { Ttype292840* typ0; typ0 = getuniquetype_528640_2036603609(typ_537302_839829468); { NI i_537363_839829468; NI HEX3Atmp_537392_839829468; NI LOC29; NI res_537395_839829468; i_537363_839829468 = (NI)0; HEX3Atmp_537392_839829468 = (NI)0; LOC29 = (NI)0; LOC29 = sonslen_295327_850551059(typ0); HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1)); res_537395_839829468 = ((NI) 0); { while (1) { TY532811 LOC32; Ropeobj178006* LOC33; if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31; i_537363_839829468 = res_537395_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = accessor0; LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468))); LOC33 = (Ropeobj178006*)0; LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2); gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]); res_537395_839829468 += ((NI) 1); } LA31: ; } } } break; case ((Ttypekind292244) 22): case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { TY178507 LOC35; memset((void*)LOC35, 0, sizeof(LOC35)); LOC35[0] = accessor0; linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1); } break; case ((Ttypekind292244) 25): { { TY178507 LOC41; TY178507 LOC42; if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = accessor0; LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1); } LA39: ; } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) { Tcproc529021* p0; Tloc292816 i0; Ttype292840* LOC1; TY535238 LOC2; NimStringDesc* LOC3; TY532811 LOC11; Ropeobj178006* LOC12; TY533289 LOC13; p0 = (*c0).p; memset((void*)(&i0), 0, sizeof(i0)); LOC1 = (Ttype292840*)0; LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = i0.r; LOC2[1] = accessor0; LOC3 = (NimStringDesc*)0; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA7: ; if (!LOC6) goto LA8; LOC3 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA4; LA8: ; { LOC3 = copyString(((NimStringDesc*) &T839829468_158)); } LA4: ; LOC2[2] = rope_178277_2381377266(LOC3); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = accessor0; LOC11[1] = i0.r; LOC12 = (Ropeobj178006*)0; LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2); gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]); memset((void*)LOC13, 0, sizeof(LOC13)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0); } N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) { Ropeobj178006* result0; Ttraversalclosure537019 c0; Tcproc529021* p0; Ropeobj178006* header0; TY178507 LOC3; Ropeobj178006* t0; TY178507 LOC4; TY178507 LOC5; Ropeobj178006* generatedproc0; TY535235 LOC20; Ropeobj178006** LOC21; Ropeobj178006** LOC22; Ropeobj178006** LOC23; TY178507 LOC24; result0 = (Ropeobj178006*)0; memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_529206_3723162438(NIM_NIL, m0); result0 = gettempname_533598_839829468(m0); switch (reason0) { case ((Ttypeinforeason537016) 0): { c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145)); } break; default: { } break; } memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = result0; header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1); t0 = gettypedesc_535673_839829468(m0, typ0); memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = t0; linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = t0; linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1); c0.p = p0; { Ropeobj178006* LOC10; if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8; LOC10 = (Ropeobj178006*)0; LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseprocseq_537399_839829468((&c0), LOC10, typ0); } goto LA6; LA8: ; { { Ttype292840* LOC14; Ropeobj178006* LOC17; LOC14 = (Ttype292840*)0; LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256)); if (!((65552 &((NU64)1<<((NU)((*LOC14).kind)&63U)))!=0)) goto LA15; LOC17 = (Ropeobj178006*)0; LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]); } goto LA12; LA15: ; { Ropeobj178006* LOC19; LOC19 = (Ropeobj178006*)0; LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189)); gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]); } LA12: ; } LA6: ; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = header0; LOC21 = (Ropeobj178006**)0; LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); LOC20[1] = (*LOC21); LOC22 = (Ropeobj178006**)0; LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1)); LOC20[2] = (*LOC22); LOC23 = (Ropeobj178006**)0; LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); LOC20[3] = (*LOC23); generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = header0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) { Ropeobj178006* LOC1; LOC1 = (Ropeobj178006*)0; LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]); gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1); } N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) { Ropeobj178006* tmp0; TY535238 LOC1; NI64 LOC2; gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0); tmp0 = getnimnode_535945_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC2 = (NI64)0; LOC2 = firstord_320001_3876443242(typ0); LOC1[1] = rope_178401_2381377266(LOC2); LOC1[2] = name0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3); } N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) { Ropeobj178006* nodeptrs0; NI length0; TY532811 LOC1; Ropeobj178006* enumnames0; Ropeobj178006* specialcases0; NI firstnimnode0; NIM_BOOL hasholes0; Ropeobj178006* enumarray0; Ropeobj178006* counter0; TY178507 LOC24; TY535238 LOC25; TY536847 LOC26; TY535235 LOC27; gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0); nodeptrs0 = gettempname_533598_839829468(m0); length0 = sonslen_295351_850551059((*typ0).n); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = nodeptrs0; LOC1[1] = rope_178401_2381377266(((NI64) (length0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2); enumnames0 = (Ropeobj178006*)0; specialcases0 = (Ropeobj178006*)0; firstnimnode0 = (*m0).typenodes; hasholes0 = NIM_FALSE; { NI i_536624_839829468; NI HEX3Atmp_536860_839829468; NI res_536863_839829468; i_536624_839829468 = (NI)0; HEX3Atmp_536860_839829468 = (NI)0; HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1)); res_536863_839829468 = ((NI) 0); { while (1) { Tsym292834* field0; Ropeobj178006* elemnode0; if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4; i_536624_839829468 = res_536863_839829468; field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536624_839829468]).kindU.S4.sym; elemnode0 = getnimnode_535945_839829468(m0); { Ropeobj178006* LOC9; if (!((*field0).ast == NIM_NIL)) goto LA7; LOC9 = (Ropeobj178006*)0; LOC9 = makecstring_191638_155036129((*(*field0).name).s); add_178482_2381377266(&enumnames0, LOC9); } goto LA5; LA7: ; { Ropeobj178006* LOC11; LOC11 = (Ropeobj178006*)0; LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval); add_178482_2381377266(&enumnames0, LOC11); } LA5: ; { NimStringDesc* LOC16; if (!(i_536624_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14; LOC16 = (NimStringDesc*)0; LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2); appendString(LOC16, ((NimStringDesc*) &T839829468_110)); appendString(LOC16, tnl_176644_4151366050); add_178487_2381377266(&enumnames0, LOC16); } LA14: ; { NIM_BOOL LOC19; TY532811 LOC23; LOC19 = (NIM_BOOL)0; LOC19 = !(((*field0).position == i_536624_839829468)); if (LOC19) goto LA20; LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0); LA20: ; if (!LOC19) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = elemnode0; LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position))); addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2); hasholes0 = NIM_TRUE; } LA21: ; res_536863_839829468 += ((NI) 1); } LA4: ; } } enumarray0 = gettempname_533598_839829468(m0); counter0 = gettempname_533598_839829468(m0); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = counter0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = enumarray0; LOC25[1] = rope_178401_2381377266(((NI64) (length0))); LOC25[2] = enumnames0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = counter0; LOC26[1] = rope_178401_2381377266(((NI64) (length0))); LOC26[2] = (*m0).typenodesname; LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0))); LOC26[4] = enumarray0; LOC26[5] = nodeptrs0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = getnimnode_535945_839829468(m0); LOC27[1] = rope_178401_2381377266(((NI64) (length0))); LOC27[2] = nodeptrs0; LOC27[3] = name0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4); { TY178507 LOC32; if (!hasholes0) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1); } LA30: ; } N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) { Ropeobj178006* result0; Ttype292840* objtype0; TY532811 LOC8; NimStringDesc* LOC9; result0 = (Ropeobj178006*)0; objtype0 = objtype_536060_839829468; { while (1) { Tsym292834* LOC3; LOC3 = (Tsym292834*)0; LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name); if (!(LOC3 == NIM_NIL)) goto LA2; objtype0 = (*objtype0).sons->data[((NI) 0)]; } LA2: ; } { if (!((*objtype0).sym == NIM_NIL)) goto LA6; internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200)); } LA6: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id))); LOC9 = (NimStringDesc*)0; LOC9 = mangle_528847_2036603609((*(*d0).name).s); LOC8[1] = rope_178277_2381377266(LOC9); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2); return result0; } N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) { switch ((*n0).kind) { case ((Tnodekind292020) 138): { NI L0; L0 = sonslen_295351_850551059(n0); { if (!(L0 == ((NI) 1))) goto LA4; genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0); } goto LA2; LA4: ; { Ropeobj178006* tmp0; TY532811 LOC9; TY535238 LOC14; if (!(((NI) 0) < L0)) goto LA7; tmp0 = gettempname_533598_839829468(m0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = tmp0; LOC9[1] = rope_178401_2381377266(((NI64) (L0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2); { NI i_536127_839829468; NI HEX3Atmp_536482_839829468; NI res_536485_839829468; i_536127_839829468 = (NI)0; HEX3Atmp_536482_839829468 = (NI)0; HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1)); res_536485_839829468 = ((NI) 0); { while (1) { Ropeobj178006* tmp20; TY535238 LOC13; if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12; i_536127_839829468 = res_536485_839829468; tmp20 = getnimnode_535945_839829468(m0); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = tmp0; LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468))); LOC13[2] = tmp20; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3); genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20); res_536485_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_178401_2381377266(((NI64) (L0))); LOC14[2] = tmp0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3); } goto LA2; LA7: ; { TY532811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = expr0; LOC16[1] = rope_178401_2381377266(((NI64) (L0))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2); } LA2: ; } break; case ((Tnodekind292020) 139): { Tsym292834* field0; Ropeobj178006* tmp0; NI64 L0; TY536401 LOC18; TY532811 LOC19; field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0); L0 = lengthord_320007_3876443242((*field0).typ); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = expr0; LOC18[1] = gettypedesc_535673_839829468(m0, typ0); LOC18[2] = (*field0).loc.r; LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ); LOC18[4] = makecstring_191638_155036129((*(*field0).name).s); LOC18[5] = tmp0; LOC18[6] = rope_178401_2381377266(L0); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0; LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1))); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2); { NI i_536421_839829468; NI HEX3Atmp_536501_839829468; NI LOC21; NI res_536504_839829468; i_536421_839829468 = (NI)0; HEX3Atmp_536501_839829468 = (NI)0; LOC21 = (NI)0; LOC21 = sonslen_295351_850551059(n0); HEX3Atmp_536501_839829468 = (NI)(LOC21 - ((NI) 1)); res_536504_839829468 = ((NI) 1); { while (1) { Tnode292802* b0; Ropeobj178006* tmp20; Tnode292802* LOC24; if (!(res_536504_839829468 <= HEX3Atmp_536501_839829468)) goto LA23; i_536421_839829468 = res_536504_839829468; b0 = (*n0).kindU.S6.sons->data[i_536421_839829468]; tmp20 = getnimnode_535945_839829468(m0); LOC24 = (Tnode292802*)0; LOC24 = lastson_295364_850551059(b0); genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20); switch ((*b0).kind) { case ((Tnodekind292020) 85): { { NI LOC28; LOC28 = (NI)0; LOC28 = sonslen_295351_850551059(b0); if (!(LOC28 < ((NI) 2))) goto LA29; internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204)); } LA29: ; { NI j_536436_839829468; NI HEX3Atmp_536494_839829468; NI LOC32; NI res_536497_839829468; j_536436_839829468 = (NI)0; HEX3Atmp_536494_839829468 = (NI)0; LOC32 = (NI)0; LOC32 = sonslen_295351_850551059(b0); HEX3Atmp_536494_839829468 = (NI)(LOC32 - ((NI) 2)); res_536497_839829468 = ((NI) 0); { while (1) { if (!(res_536497_839829468 <= HEX3Atmp_536494_839829468)) goto LA34; j_536436_839829468 = res_536497_839829468; { NI x0; NI64 LOC39; NI y0; NI64 LOC40; if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37; LOC39 = (NI64)0; LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]); x0 = ((NI) (LOC39)); LOC40 = (NI64)0; LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]); y0 = ((NI) (LOC40)); { while (1) { TY535238 LOC43; if (!(x0 <= y0)) goto LA42; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = tmp0; LOC43[1] = rope_178401_2381377266(((NI64) (x0))); LOC43[2] = tmp20; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3); x0 += ((NI) 1); } LA42: ; } } goto LA35; LA37: ; { TY535238 LOC45; NI64 LOC46; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = tmp0; LOC46 = (NI64)0; LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]); LOC45[1] = rope_178401_2381377266(LOC46); LOC45[2] = tmp20; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3); } LA35: ; res_536497_839829468 += ((NI) 1); } LA34: ; } } } break; case ((Tnodekind292020) 88): { TY535238 LOC48; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = tmp0; LOC48[1] = rope_178401_2381377266(L0); LOC48[2] = tmp20; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3); } break; default: { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205)); } break; } res_536504_839829468 += ((NI) 1); } LA23: ; } } } break; case ((Tnodekind292020) 3): { Tsym292834* field0; field0 = (*n0).kindU.S4.sym; { TY536475 LOC55; if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = expr0; LOC55[1] = gettypedesc_535673_839829468(m0, typ0); LOC55[2] = (*field0).loc.r; LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ); LOC55[4] = makecstring_191638_155036129((*(*field0).name).s); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5); } LA53: ; } break; default: { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207)); } break; } } N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) { Ropeobj178006* tmp0; TY532811 LOC12; Ttype292840* t0; { if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3; gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0); } goto LA1; LA3: ; { Ropeobj178006* LOC6; LOC6 = (Ropeobj178006*)0; LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6); } LA1: ; tmp0 = getnimnode_535945_839829468(m0); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = isimportedcpptype_533478_839829468(typ0); if (!!(LOC9)) goto LA10; genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0); } LA10: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = name0; LOC12[1] = tmp0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2); t0 = (*typ0).sons->data[((NI) 0)]; { while (1) { if (!!((t0 == NIM_NIL))) goto LA14; t0 = skiptypes_296099_850551059(t0, IL64(211106247215360)); (*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8)); t0 = (*t0).sons->data[((NI) 0)]; } LA14: ; } } N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) { TY532811 LOC1; genproc_532951_839829468(m0, s0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = result0; LOC1[1] = (*s0).loc.r; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2); } N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) { Ropeobj178006* result0; Ttype292840* origtype0; Ttype292840* t0; TY178507 LOC1; Tsym292834* owner0; Ttype292840* LOC12; Ropeobj178006* LOC66; Ropeobj178006* LOC67; Ropeobj178006* LOC68; { result0 = (Ropeobj178006*)0; origtype0 = t_535944_839829468; t0 = getuniquetype_528640_2036603609(t_535944_839829468); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id))); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1); { NIM_BOOL LOC4; Ropeobj178006* LOC7; Ropeobj178006* LOC8; Ropeobj178006* LOC9; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id); if (!LOC4) goto LA5; LOC7 = (Ropeobj178006*)0; LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128)); LOC8 = (Ropeobj178006*)0; LOC8 = HEX26_178418_2381377266(LOC7, result0); LOC9 = (Ropeobj178006*)0; LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_178418_2381377266(LOC8, LOC9); goto BeforeRet; } LA5: ; { while (1) { if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11; t0 = lastson_295377_850551059(t0); } LA11: ; } LOC12 = (Ttype292840*)0; LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320)); owner0 = getmodule_299123_2984716966((*LOC12).owner); { Tcgen529027* LOC17; Ropeobj178006* LOC18; Ropeobj178006* LOC19; Ropeobj178006* LOC20; TY532811 LOC21; NimStringDesc* LOC22; Ropeobj178006* LOC23; Ropeobj178006* LOC24; Ropeobj178006* LOC25; if (!!((owner0 == (*m0).module))) goto LA15; LOC17 = (Tcgen529027*)0; LOC17 = bmod_529201_3723162438(owner0); LOC18 = (Ropeobj178006*)0; LOC18 = gentypeinfo_535941_839829468(LOC17, t0); LOC19 = (Ropeobj178006*)0; LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129)); LOC20 = (Ropeobj178006*)0; LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = result0; LOC22 = (NimStringDesc*)0; LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0)); LOC21[1] = rope_178277_2381377266(LOC22); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2); LOC23 = (Ropeobj178006*)0; LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128)); LOC24 = (Ropeobj178006*)0; LOC24 = HEX26_178418_2381377266(LOC23, result0); LOC25 = (Ropeobj178006*)0; LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_178418_2381377266(LOC24, LOC25); goto BeforeRet; } LA15: ; switch ((*t0).kind) { case ((Ttypekind292244) 3): case ((Ttypekind292244) 62): { result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132)); } break; case ((Ttypekind292244) 26): case ((Ttypekind292244) 1): case ((Ttypekind292244) 2): case ((Ttypekind292244) 29): case ((Ttypekind292244) 28): case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44): case ((Ttypekind292244) 23): { Ropeobj178006* LOC28; LOC28 = (Ropeobj178006*)0; LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28); } break; case ((Ttypekind292244) 59): { { Ttype292840* LOC34; if (!!(((*t0).n == NIM_NIL))) goto LA32; LOC34 = (Ttype292840*)0; LOC34 = lastson_295377_850551059(t0); result0 = gentypeinfo_535941_839829468(m0, LOC34); } goto LA30; LA32: ; { NimStringDesc* LOC36; LOC36 = (NimStringDesc*)0; LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13); appendString(LOC36, ((NimStringDesc*) &T839829468_137)); appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244))); appendChar(LOC36, 41); internalerror_196113_155036129(LOC36); } LA30: ; } break; case ((Ttypekind292244) 25): { { Ropeobj178006* LOC42; if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40; LOC42 = (Ropeobj178006*)0; LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42); } goto LA38; LA40: ; { Ttype292840* LOC44; LOC44 = (Ttype292840*)0; LOC44 = fakeclosuretype_537010_839829468((*t0).owner); gentupleinfo_536551_839829468(m0, LOC44, result0); } LA38: ; } break; case ((Ttypekind292244) 24): case ((Ttypekind292244) 22): { gentypeinfoaux_536027_839829468(m0, t0, t0, result0); { Ropeobj178006* markerproc0; TY532811 LOC50; if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48; markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0)); memset((void*)LOC50, 0, sizeof(LOC50)); LOC50[0] = result0; LOC50[1] = markerproc0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2); } LA48: ; } break; case ((Ttypekind292244) 21): case ((Ttypekind292244) 20): { gentypeinfoaux_536027_839829468(m0, t0, t0, result0); } break; case ((Ttypekind292244) 4): case ((Ttypekind292244) 16): { genarrayinfo_537005_839829468(m0, t0, result0); } break; case ((Ttypekind292244) 19): { gensetinfo_536867_839829468(m0, t0, result0); } break; case ((Ttypekind292244) 14): { genenuminfo_536599_839829468(m0, t0, result0); } break; case ((Ttypekind292244) 17): { genobjectinfo_536508_839829468(m0, t0, origtype0, result0); } break; case ((Ttypekind292244) 18): { gentupleinfo_536551_839829468(m0, t0, result0); } break; default: { NimStringDesc* LOC58; LOC58 = (NimStringDesc*)0; LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13); appendString(LOC58, ((NimStringDesc*) &T839829468_137)); appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244))); appendChar(LOC58, 41); internalerror_196113_155036129(LOC58); } break; } { if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61; gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0); } goto LA59; LA61: ; { if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64; gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0); } goto LA59; LA64: ; LA59: ; LOC66 = (Ropeobj178006*)0; LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128)); LOC67 = (Ropeobj178006*)0; LOC67 = HEX26_178418_2381377266(LOC66, result0); LOC68 = (Ropeobj178006*)0; LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_178418_2381377266(LOC67, LOC68); }BeforeRet: ; return result0; } N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) { Ropeobj178006* a0; TY535235 LOC16; NimStringDesc* LOC17; { { if (!!(((163840 & (*p0).options) == 163840))) goto LA3; goto BeforeRet; } LA3: ; { Ttype292840* LOC7; LOC7 = (Ttype292840*)0; LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864)); if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC7).kind)&63U)))!=0)) goto LA8; goto BeforeRet; } LA8: ; a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*s0).kind == ((Tsymkind292435) 3)); if (!(LOC12)) goto LA13; LOC12 = ccgintroducedptr_533611_839829468(s0); LA13: ; if (!LOC12) goto LA14; a0 = (*s0).loc.r; } LA14: ; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen))); LOC17 = (NimStringDesc*)0; LOC17 = nsuNormalize((*(*s0).name).s); LOC16[1] = makecstring_191638_155036129(LOC17); LOC16[2] = a0; LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t); linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4); (*p0).maxframelen += ((NI) 1); (*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1); }BeforeRet: ; } N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) { Ropeobj178006* decl0; Ropeobj178006* LOC1; Ropeobj178006* LOC2; LOC1 = (Ropeobj178006*)0; LOC1 = localvardecl_538532_839829468(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125)); decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050); line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0); localdebuginfo_538449_839829468(p0, s0); } N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) { { if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3; { if (!!(immediateasgn0)) goto LA7; constructloc_538388_839829468(p0, (&(*v0).loc), NIM_FALSE); } LA7: ; } LA3: ; } N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) { TY533289 LOC1; Ropeobj178006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj178006*)0; LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0); fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2)); { NIM_BOOL LOC5; Tctypekind529007 LOC6; LOC5 = (NIM_BOOL)0; LOC6 = (Tctypekind529007)0; LOC6 = mapreturntype_533447_839829468((*param0).typ); LOC5 = !((LOC6 == ((Tctypekind529007) 17))); if (!(LOC5)) goto LA7; LOC5 = isinvalidreturntype_533550_839829468((*param0).typ); LA7: ; if (!LOC5) goto LA8; (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc292812) 0); } LA8: ; } N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) { localdebuginfo_538449_839829468(p0, s0); } N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) { Tnode292802* ls0; Tnode292802* LOC5; Tsym292834* env0; TY532811 LOC10; { { if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; LOC5 = (Tnode292802*)0; LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3)); ls0 = lastson_295364_850551059(LOC5); { if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8; internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211)); } LA8: ; env0 = (*ls0).kindU.S4.sym; assignlocalvar_538614_839829468(p0, env0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_538188_839829468((&(*env0).loc)); LOC10[1] = gettypedesc_535673_839829468((*p0).module, (*env0).typ); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2); }BeforeRet: ; } N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { TY178507 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).gcframetype; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218)); { Ropeobj178006* LOC6; TY535235 LOC7; if (!(((NI) 0) < (*p0).maxframelen)) goto LA4; LOC6 = (Ropeobj178006*)0; LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = procname0; LOC7[1] = filename0; LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen))); LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen))); result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4); } goto LA2; LA4: ; { TY532811 LOC9; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = procname0; LOC9[1] = filename0; result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2); } LA2: ; return result0; } N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0); add_178482_2381377266(LOC1, LOC2); } N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { TY533289 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0); } LA3: ; return result0; } N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) { Ropeobj178006* result0; TY533289 LOC1; result0 = (Ropeobj178006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0); return result0; } N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) { Tcproc529021* p0; Ropeobj178006* header0; Ropeobj178006* returnstmt0; Tnode292802* LOC51; Ropeobj178006* generatedproc0; p0 = newproc_529206_3723162438(prc0, m0); header0 = genprocheader_535867_839829468(m0, prc0); returnstmt0 = NIM_NIL; { NIM_BOOL LOC3; Tsym292834* res0; LOC3 = (NIM_BOOL)0; LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)); if (!(LOC3)) goto LA4; LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL)); LA4: ; if (!LOC3) goto LA5; { NI LOC9; LOC9 = (NI)0; LOC9 = len_293081_850551059((*prc0).ast); if (!(LOC9 <= ((NI) 7))) goto LA10; internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120)); } LA10: ; res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym; { NIM_BOOL LOC14; TY178507 LOC34; LOC14 = (NIM_BOOL)0; LOC14 = isinvalidreturntype_533550_839829468((*(*prc0).typ).sons->data[((NI) 0)]); if (!!(LOC14)) goto LA15; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19; (*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8)); } LA19: ; { NIM_BOOL LOC23; NIM_BOOL LOC24; NIM_BOOL LOC26; Tnode292802* val0; Tnode292802* LOC29; Ropeobj178006* decl0; Tloc292816 a0; TY532811 LOC32; LOC23 = (NIM_BOOL)0; LOC24 = (NIM_BOOL)0; LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0); if (!(LOC24)) goto LA25; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA27: ; LOC24 = LOC26; LA25: ; LOC23 = LOC24; if (!(LOC23)) goto LA28; LOC29 = (Tnode292802*)0; LOC29 = getbody_335226_1724185294(prc0); val0 = easyresultasgn_560191_839829468(LOC29); LOC23 = !((val0 == NIM_NIL)); LA28: ; if (!LOC23) goto LA30; decl0 = localvardecl_538532_839829468(p0, res0); memset((void*)(&a0), 0, sizeof(a0)); initlocexprsingleuse_539289_839829468(p0, val0, (&a0)); memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = decl0; LOC32[1] = rdloc_538188_839829468((&a0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2); } goto LA21; LA30: ; { assignlocalvar_538614_839829468(p0, res0); initlocalvar_538398_839829468(p0, res0, NIM_FALSE); } LA21: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_538188_839829468((&(*res0).loc)); returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1); } goto LA12; LA15: ; { fillresult_533865_839829468(res0); assignparam_538994_839829468(p0, res0); { Ttype292840* LOC38; LOC38 = (Ttype292840*)0; LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256)); if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39; (*res0).loc.s = ((Tstorageloc292812) 0); } LA39: ; } LA12: ; } LA5: ; { NI i_560627_839829468; NI HEX3Atmp_560743_839829468; NI LOC42; NI res_560746_839829468; i_560627_839829468 = (NI)0; HEX3Atmp_560743_839829468 = (NI)0; LOC42 = (NI)0; LOC42 = sonslen_295351_850551059((*(*prc0).typ).n); HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1)); res_560746_839829468 = ((NI) 1); { while (1) { if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44; i_560627_839829468 = res_560746_839829468; { Tsym292834* param0; param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym; { NIM_BOOL LOC48; LOC48 = (NIM_BOOL)0; LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ); if (!LOC48) goto LA49; goto LA45; } LA49: ; assignparam_538994_839829468(p0, param0); } LA45: ; res_560746_839829468 += ((NI) 1); } LA44: ; } } closuresetup_560158_839829468(p0, prc0); LOC51 = (Tnode292802*)0; LOC51 = getbody_335226_1724185294(prc0); genstmts_539244_839829468(p0, LOC51); generatedproc0 = (Ropeobj178006*)0; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54; { if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58; header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA58: ; } LA54: ; { TY535235 LOC68; Ropeobj178006** LOC69; Ropeobj178006** LOC70; Ropeobj178006** LOC71; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62; { if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66; header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0); } LA66: ; memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = header0; LOC69 = (Ropeobj178006**)0; LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); LOC68[1] = (*LOC69); LOC70 = (Ropeobj178006**)0; LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1)); LOC68[2] = (*LOC70); LOC71 = (Ropeobj178006**)0; LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); LOC68[3] = (*LOC71); generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4); } goto LA60; LA62: ; { TY178507 LOC73; Ropeobj178006* LOC74; Ropeobj178006** LOC93; Ropeobj178006** LOC94; Ropeobj178006* LOC101; TY533289 LOC107; Ropeobj178006* LOC108; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = header0; generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1); LOC74 = (Ropeobj178006*)0; LOC74 = initgcframe_538435_839829468(p0); add_178482_2381377266(&generatedproc0, LOC74); { Ropeobj178006** LOC79; Ropeobj178006* procname0; Ropeobj178006* LOC80; Ropeobj178006* LOC81; if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77; LOC79 = (Ropeobj178006**)0; LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); add_178482_2381377266(&generatedproc0, (*LOC79)); procname0 = makecstring_191638_155036129((*(*prc0).name).s); LOC80 = (Ropeobj178006*)0; LOC80 = quotedfilename_196818_155036129((*prc0).info); LOC81 = (Ropeobj178006*)0; LOC81 = initframe_560140_839829468(p0, procname0, LOC80); add_178482_2381377266(&generatedproc0, LOC81); } goto LA75; LA77: ; { Ropeobj178006** LOC83; LOC83 = (Ropeobj178006**)0; LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); add_178482_2381377266(&generatedproc0, (*LOC83)); } LA75: ; { TY533289 LOC88; if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86; memset((void*)LOC88, 0, sizeof(LOC88)); appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0); } LA86: ; { if (!(*p0).beforeretneeded) goto LA91; add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223)); } LA91: ; LOC93 = (Ropeobj178006**)0; LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1)); add_178482_2381377266(&generatedproc0, (*LOC93)); LOC94 = (Ropeobj178006**)0; LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(&generatedproc0, (*LOC94)); { TY533289 LOC99; Ropeobj178006* LOC100; if (!(*p0).beforeretneeded) goto LA97; memset((void*)LOC99, 0, sizeof(LOC99)); LOC100 = (Ropeobj178006*)0; LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0); add_178482_2381377266(&generatedproc0, LOC100); } LA97: ; LOC101 = (Ropeobj178006*)0; LOC101 = deinitgcframe_538441_839829468(p0); add_178482_2381377266(&generatedproc0, LOC101); { Ropeobj178006* LOC106; if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104; LOC106 = (Ropeobj178006*)0; LOC106 = deinitframe_560150_839829468(p0); add_178482_2381377266(&generatedproc0, LOC106); } LA104: ; add_178482_2381377266(&generatedproc0, returnstmt0); memset((void*)LOC107, 0, sizeof(LOC107)); LOC108 = (Ropeobj178006*)0; LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0); add_178482_2381377266(&generatedproc0, LOC108); } LA60: ; add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0); } N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) { Tcgen529027* result0; Tsym292834* ms0; result0 = (Tcgen529027*)0; ms0 = getmodule_299123_2984716966(s0); result0 = gmodules_529170_3723162438->data[(*ms0).position]; return result0; } N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0) { NIM_BOOL result0; Tnode292802* n0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; n0 = (*lib0).path; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32)); if (!(LOC2)) goto LA3; LOC2 = !(((*n0).typ == NIM_NIL)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((100663296 &((NU64)1<<((NU)((*(*n0).typ).kind)&63U)))!=0); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) { initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0)); expr_539248_839829468(p0, e0, result0); } N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0) { { Ropeobj178006* tmp0; TY178507 LOC5; if (!!((*lib0).generated)) goto LA3; (*lib0).generated = NIM_TRUE; tmp0 = gettempname_533598_839829468(m0); asgnRefNoCycle((void**) (&(*lib0).name), tmp0); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = tmp0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1); { TY135002* s0; Ropeobj178006* loadlib0; TY532811 LOC18; if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8; s0 = (TY135002*) newSeq((&NTI135002), 0); libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0)); rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval); loadlib0 = NIM_NIL; { NI i_559847_839829468; NI HEX3Atmp_559902_839829468; NI res_559905_839829468; i_559847_839829468 = (NI)0; HEX3Atmp_559902_839829468 = (NI)0; HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1); res_559905_839829468 = ((NI) 0); { while (1) { TY532811 LOC17; if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12; i_559847_839829468 = res_559905_839829468; (*m0).labels += ((NI) 1); { if (!(((NI) 0) < i_559847_839829468)) goto LA15; add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229)); } LA15: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]); appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2); res_559905_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = loadlib0; LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2); } goto LA6; LA8: ; { Tcproc529021* p0; Tloc292816 dest0; Ropeobj178006** LOC20; Ropeobj178006** LOC21; Ropeobj178006** LOC22; TY532811 LOC23; p0 = newproc_529206_3723162438(NIM_NIL, m0); (*p0).options = ((*p0).options & ~ 163840); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0)); LOC20 = (Ropeobj178006**)0; LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20)); LOC21 = (Ropeobj178006**)0; LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1)); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21)); LOC22 = (Ropeobj178006**)0; LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22)); memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = tmp0; LOC23[1] = rdloc_538188_839829468((&dest0)); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2); } LA6: ; } LA3: ; { if (!((*lib0).name == NIM_NIL)) goto LA26; internalerror_196113_155036129(((NimStringDesc*) &T839829468_233)); } LA26: ; } N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3; result0 = rope_178277_2381377266((*(*sym0).name).s); } goto LA1; LA3: ; { TY178507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id))); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1); } LA1: ; return result0; } N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) { Tlib292820* lib0; NIM_BOOL iscall0; Ropeobj178006* extname0; Ropeobj178006* tmp0; TY532811 LOC43; lib0 = (*sym0).annex; iscall0 = isgetprocaddr_559443_839829468(lib0); extname0 = (*sym0).loc.r; { if (!!(iscall0)) goto LA3; loaddynamiclib_559481_839829468(m0, lib0); } LA3: ; tmp0 = mangledynlibproc_538816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); (*m0).labels += ((NI) 2); { Tnode292802* n0; Tloc292816 a0; Tnode292802* LOC9; Ropeobj178006* params0; Ropeobj178006* LOC10; Ropeobj178006* load0; TY535235 LOC17; NimStringDesc* LOC18; Tnode292802* last0; NimStringDesc* idx0; if (!iscall0) goto LA7; n0 = (*lib0).path; memset((void*)(&a0), 0, sizeof(a0)); LOC9 = (Tnode292802*)0; LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0)); initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0)); LOC10 = (Ropeobj178006*)0; LOC10 = rdloc_538188_839829468((&a0)); params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118)); { NI i_559964_839829468; NI HEX3Atmp_560025_839829468; NI LOC12; NI res_560028_839829468; i_559964_839829468 = (NI)0; HEX3Atmp_560025_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = len_293081_850551059(n0); HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2)); res_560028_839829468 = ((NI) 1); { while (1) { Tnode292802* LOC15; Ropeobj178006* LOC16; if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14; i_559964_839829468 = res_560028_839829468; LOC15 = (Tnode292802*)0; LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468); initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0)); LOC16 = (Ropeobj178006*)0; LOC16 = rdloc_538188_839829468((&a0)); add_178482_2381377266(&params0, LOC16); add_178487_2381377266(&params0, ((NimStringDesc*) &T839829468_110)); res_560028_839829468 += ((NI) 1); } LA14: ; } } memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = gettypedesc_535673_839829468(m0, (*sym0).typ); LOC17[2] = params0; LOC18 = (NimStringDesc*)0; LOC18 = HEX24_178856_2381377266(extname0); LOC17[3] = makecstring_191638_155036129(LOC18); load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4); last0 = lastson_295364_850551059(n0); { if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21; last0 = (*last0).kindU.S6.sons->data[((NI) 1)]; } LA21: ; { NimStringDesc* LOC27; if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25; LOC27 = (NimStringDesc*)0; LOC27 = HEX24_196185_1689653243(T839829468_236); internalerror_196113_155036129(LOC27); } LA25: ; idx0 = (*last0).kindU.S3.strval; { Ropeobj178006** LOC32; if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30; LOC32 = (Ropeobj178006**)0; LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC32, load0); } goto LA28; LA30: ; { NIM_BOOL LOC34; LOC34 = (NIM_BOOL)0; LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1)); if (!(LOC34)) goto LA35; LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57))); LA35: ; if (!LOC34) goto LA36; add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0); } goto LA28; LA36: ; { NimStringDesc* LOC39; LOC39 = (NimStringDesc*)0; LOC39 = rawNewString(idx0->Sup.len + 13); appendString(LOC39, ((NimStringDesc*) &T839829468_237)); appendString(LOC39, idx0); internalerror_196100_155036129((*sym0).info, LOC39); } LA28: ; } goto LA5; LA7: ; { TY535235 LOC41; NimStringDesc* LOC42; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = gettypedesc_535673_839829468(m0, (*sym0).typ); LOC41[2] = (*lib0).name; LOC42 = (NimStringDesc*)0; LOC42 = HEX24_178856_2381377266(extname0); LOC41[3] = makecstring_191638_155036129(LOC42); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4); } LA5: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*sym0).loc.r; LOC43[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2); } N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) { asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0)); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); } N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) { { fillprocloc_539201_839829468(prc0); useheader_532369_839829468(m0, prc0); { Ropeobj178006* LOC5; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s); goto BeforeRet; } LA3: ; genprocprototype_539254_839829468(m0, prc0); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8; } goto LA6; LA8: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id); if (!!(LOC15)) goto LA16; genprocaux_560284_839829468(m0, prc0); } LA16: ; } goto LA6; LA11: ; { Tcgen529027* q0; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19; q0 = findpendingmodule_532241_839829468(m0, prc0); { NIM_BOOL LOC23; NIM_BOOL LOC25; LOC23 = (NIM_BOOL)0; LOC23 = !((q0 == NIM_NIL)); if (!(LOC23)) goto LA24; LOC25 = (NIM_BOOL)0; LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC23 = !(LOC25); LA24: ; if (!LOC23) goto LA26; symindynamiclib_559929_839829468(q0, prc0); } goto LA21; LA26: ; { symindynamiclibpartial_560071_839829468(m0, prc0); } LA21: ; } goto LA6; LA19: ; { Tcgen529027* q0; if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30; q0 = findpendingmodule_532241_839829468(m0, prc0); { NIM_BOOL LOC34; NIM_BOOL LOC36; LOC34 = (NIM_BOOL)0; LOC34 = !((q0 == NIM_NIL)); if (!(LOC34)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC34 = !(LOC36); LA35: ; if (!LOC34) goto LA37; genprocaux_560284_839829468(q0, prc0); } LA37: ; } goto LA6; LA30: ; LA6: ; }BeforeRet: ; } N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) { { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isactivated_561431_839829468(prc0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; goto BeforeRet; } LA6: ; fillprocloc_539201_839829468(prc0); { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10; addforwardedproc_532203_839829468(m0, prc0); } goto LA8; LA10: ; { genprocnoforward_560906_839829468(m0, prc0); { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = ((65600 & (*prc0).flags) == 64); if (!(LOC16)) goto LA17; LOC16 = !((generatedheader_532201_839829468 == NIM_NIL)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)); LA18: ; if (!LOC15) goto LA19; genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0); { if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23; { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id); if (!!(LOC27)) goto LA28; genprocaux_560284_839829468(generatedheader_532201_839829468, prc0); } LA28: ; } LA23: ; } LA19: ; } LA8: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0); return result0; } N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_532949_839829468(); if (!LOC3) goto LA4; { NIM_BOOL LOC8; TY532811 LOC11; LOC8 = (NIM_BOOL)0; LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id); if (!!(LOC8)) goto LA9; nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*)); asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t); ++nimtvdeps_538674_839829468->Sup.len; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_535673_839829468(m0, (*s0).loc.t); LOC11[1] = (*s0).loc.r; addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2); } LA9: ; } goto LA1; LA4: ; { Ropeobj178006* LOC21; TY178507 LOC22; { if (!isextern0) goto LA15; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240)); } LA15: ; { if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241)); } LA19: ; LOC21 = (Ropeobj178006*)0; LOC21 = gettypedesc_535673_839829468(m0, (*s0).loc.t); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21); memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = (*s0).loc.r; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1); } LA1: ; } N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) { Ropeobj178006* LOC1; { useheader_532369_839829468(m0, sym0); LOC1 = (Ropeobj178006*)0; LOC1 = manglename_533205_839829468(sym0); fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3)); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0); if (LOC4) goto LA5; LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LA5: ; if (!LOC4) goto LA6; goto BeforeRet; } LA6: ; { if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14; declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE); } goto LA12; LA14: ; { Ropeobj178006* LOC17; TY178507 LOC30; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240)); LOC17 = (Ropeobj178006*)0; LOC17 = gettypedesc_535673_839829468(m0, (*sym0).loc.t); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53)); } LA20: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121)); } LA24: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28; add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122)); } LA28: ; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = (*sym0).loc.r; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1); } LA12: ; } LA10: ; }BeforeRet: ; } N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) { genvarprototypeaux_544254_839829468(m0, sym0); } N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) { Ropeobj178006* result0; Tsym292834* sym0; result0 = (Ropeobj178006*)0; sym0 = getcompilerproc_338748_3937434831(name0); { if (!!((sym0 == NIM_NIL))) goto LA3; switch ((*sym0).kind) { case ((Tsymkind292435) 12): case ((Tsymkind292435) 13): case ((Tsymkind292435) 15): case ((Tsymkind292435) 14): { genproc_532951_839829468(m0, sym0); } break; case ((Tsymkind292435) 8): case ((Tsymkind292435) 11): case ((Tsymkind292435) 9): { genvarprototype_539236_839829468(m0, sym0); } break; case ((Tsymkind292435) 7): { Ropeobj178006* LOC8; LOC8 = (Ropeobj178006*)0; LOC8 = gettypedesc_535673_839829468(m0, (*sym0).typ); } break; default: { NimStringDesc* LOC10; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9); appendString(LOC10, ((NimStringDesc*) &T839829468_243)); appendString(LOC10, name0); appendString(LOC10, ((NimStringDesc*) &T839829468_244)); appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435))); internalerror_196113_155036129(LOC10); } break; } } goto LA1; LA3: ; { rawmessage_194612_155036129(((Tmsgkind191002) 68), name0); } LA1: ; result0 = (*sym0).loc.r; return result0; } N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006* result0; NI i0; NI length0; NI num0; result0 = (Ropeobj178006*)0; i0 = ((NI) 0); length0 = (frmt0 ? frmt0->Sup.len : 0); result0 = NIM_NIL; num0 = ((NI) 0); { while (1) { NI start0; if (!(i0 < length0)) goto LA2; { if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5; i0 += ((NI) 1); switch (((NU8)(frmt0->data[i0]))) { case 36: { add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19)); i0 += ((NI) 1); } break; case 35: { i0 += ((NI) 1); add_178482_2381377266(&result0, args0[num0]); num0 += ((NI) 1); } break; case 48 ... 57: { NI j0; j0 = ((NI) 0); { while (1) { j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = (length0 <= i0); if (LOC14) goto LA15; LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))); LA15: ; if (!LOC14) goto LA16; goto LA10; } LA16: ; } } LA10: ; num0 = j0; { NimStringDesc* LOC22; NimStringDesc* LOC23; if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20; LOC22 = (NimStringDesc*)0; LOC23 = (NimStringDesc*)0; LOC23 = nimIntToStr(j0); LOC22 = rawNewString(LOC23->Sup.len + 30); appendString(LOC22, ((NimStringDesc*) &T839829468_20)); appendString(LOC22, LOC23); internalerror_196113_155036129(LOC22); } LA20: ; add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]); } break; case 110: { { if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27; add_178482_2381377266(&result0, rnl_178903_2381377266); } LA27: ; i0 += ((NI) 1); } break; case 78: { add_178482_2381377266(&result0, rnl_178903_2381377266); i0 += ((NI) 1); } break; default: { NimStringDesc* LOC31; LOC31 = (NimStringDesc*)0; LOC31 = rawNewString(31); appendString(LOC31, ((NimStringDesc*) &T839829468_20)); appendChar(LOC31, frmt0->data[i0]); internalerror_196113_155036129(LOC31); } break; } } goto LA3; LA5: ; { NIM_BOOL LOC33; NI j0; NimStringDesc* ident0; Ropeobj178006* LOC39; LOC33 = (NIM_BOOL)0; LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC33)) goto LA34; LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95))); LA34: ; if (!LOC33) goto LA35; i0 += ((NI) 1); j0 = i0; { while (1) { if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38; j0 += ((NI) 1); } LA38: ; } ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1))); i0 = j0; LOC39 = (Ropeobj178006*)0; LOC39 = cgsym_532403_839829468(m0, ident0); add_178482_2381377266(&result0, LOC39); } goto LA3; LA35: ; { NIM_BOOL LOC41; NI j0; NimStringDesc* LOC47; Ropeobj178006* LOC48; LOC41 = (NIM_BOOL)0; LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC41)) goto LA42; LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36)); LA42: ; if (!LOC41) goto LA43; i0 += ((NI) 2); j0 = ((NI) 0); { while (1) { if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46; j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); } LA46: ; } LOC47 = (NimStringDesc*)0; LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]); LOC48 = (Ropeobj178006*)0; LOC48 = cgsym_532403_839829468(m0, LOC47); add_178482_2381377266(&result0, LOC48); } goto LA3; LA43: ; LA3: ; start0 = i0; { while (1) { if (!(i0 < length0)) goto LA50; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36))); if (!(LOC53)) goto LA54; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35))); LA54: ; if (!LOC53) goto LA55; i0 += ((NI) 1); } goto LA51; LA55: ; { goto LA49; } LA51: ; } LA50: ; } LA49: ; { NimStringDesc* LOC62; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60; LOC62 = (NimStringDesc*)0; LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1))); add_178487_2381377266(&result0, LOC62); } LA60: ; } LA2: ; } return result0; } static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; Tsym292834* LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); if (!(LOC2)) goto LA3; LOC4 = (Tsym292834*)0; LOC4 = getmodule_299123_2984716966(sym0); LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA5; LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2))); LA5: ; result0 = LOC1; return result0; } N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) { { useheader_532369_839829468(m0, sym0); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7; { NIM_BOOL LOC11; Tsym292834* LOC12; NIM_BOOL LOC14; TY532811 LOC17; Ropeobj178006* LOC18; LOC11 = (NIM_BOOL)0; LOC12 = (Tsym292834*)0; LOC12 = getmodule_299123_2984716966(sym0); LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id)); if (!(LOC11)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC11 = !(LOC14); LA13: ; if (!LOC11) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t); LOC17[1] = mangledynlibproc_538816_839829468(sym0); LOC18 = (Ropeobj178006*)0; LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18); } LA15: ; } goto LA5; LA7: ; { NIM_BOOL LOC20; Ropeobj178006* header0; TY178507 LOC47; Ropeobj178006* LOC48; LOC20 = (NIM_BOOL)0; LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id); if (!!(LOC20)) goto LA21; header0 = genprocheader_535867_839829468(m0, sym0); { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0); if (!(LOC25)) goto LA26; LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0); LA26: ; if (!LOC25) goto LA27; header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA27: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5))); if (!(LOC31)) goto LA32; LOC31 = crossescppboundary_560754_839829468(m0, sym0); LA32: ; if (!LOC31) goto LA33; header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0); } LA33: ; { NIM_BOOL LOC37; LOC37 = (NIM_BOOL)0; LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0); if (!(LOC37)) goto LA38; LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0); LA38: ; if (!LOC37) goto LA39; add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247)); } LA39: ; { NIM_BOOL LOC43; LOC43 = (NIM_BOOL)0; LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0); if (!(LOC43)) goto LA44; LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0); LA44: ; if (!LOC43) goto LA45; add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248)); } LA45: ; memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = header0; LOC48 = (Ropeobj178006*)0; LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48); } goto LA5; LA21: ; LA5: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831); return result0; } N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY532811 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((*dest0).s == ((Tstorageloc292812) 2)); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = usesnativegc_169177_2607990831(); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468(dest0); LOC8[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2); } goto LA1; LA6: ; { if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA10; { NIM_BOOL LOC14; TY532811 LOC17; LOC14 = (NIM_BOOL)0; LOC14 = canformacycle_320123_3876443242((*dest0).t); if (!LOC14) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_538204_839829468(dest0); LOC17[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2); } goto LA12; LA15: ; { TY532811 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_538204_839829468(dest0); LOC19[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2); } LA12: ; } goto LA1; LA10: ; { TY532811 LOC21; memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = addrloc_538204_839829468(dest0); LOC21[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2); } LA1: ; } N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) { Ropeobj178006* LOC1; Ropeobj178006* LOC2; (*Result).k = ((Tlockind292808) 5); (*Result).s = (*a0).s; unsureAsgnRef((void**) (&(*Result).t), t0); LOC1 = (Ropeobj178006*)0; LOC1 = rdloc_538188_839829468(a0); LOC2 = (Ropeobj178006*)0; LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257)); unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0)); } N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) { Tassignmentflag538302Set newflags0; Ttype292840* t_550053_839829468; Ttype292840* LOC9; { if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA3; newflags0 = (flags0 | 1); } goto LA1; LA3: ; { if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6; newflags0 = (flags0 & ~ 1); } goto LA1; LA6: ; { newflags0 = flags0; } LA1: ; LOC9 = (Ttype292840*)0; LOC9 = skiptypes_296099_850551059((*dest0).t, IL64(211106232576256)); t_550053_839829468 = getuniquetype_528640_2036603609(LOC9); { NI i_550071_839829468; NI HEX3Atmp_550077_839829468; NI LOC11; NI res_550080_839829468; i_550071_839829468 = (NI)0; HEX3Atmp_550077_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = len_295339_850551059(t_550053_839829468); HEX3Atmp_550077_839829468 = (LOC11 - 1); res_550080_839829468 = ((NI) 0); { while (1) { Ttype292840* t0; Ropeobj178006* field0; TY178507 LOC14; Tloc292816 LOC15; Tloc292816 LOC16; if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13; i_550071_839829468 = res_550080_839829468; t0 = (*t_550053_839829468).sons->data[i_550071_839829468]; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468))); field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_549789_839829468(dest0, t0, field0, (&LOC15)); memset((void*)(&LOC16), 0, sizeof(LOC16)); optasgnloc_549789_839829468(src0, t0, field0, (&LOC16)); genassignment_539264_839829468(p0, (&LOC15), (&LOC16), newflags0); res_550080_839829468 += ((NI) 1); } LA13: ; } } } N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) { { NIM_BOOL LOC3; Ttype292840* LOC5; LOC3 = (NIM_BOOL)0; LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype292840*)0; LOC5 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440)); LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0); LA4: ; if (!LOC3) goto LA6; { NIM_BOOL LOC10; NIM_BOOL LOC12; TY535238 LOC15; LOC10 = (NIM_BOOL)0; LOC10 = ((*dest0).s == ((Tstorageloc292812) 2)); if (LOC10) goto LA11; LOC12 = (NIM_BOOL)0; LOC12 = usesnativegc_169177_2607990831(); LOC10 = !(LOC12); LA11: ; if (!LOC10) goto LA13; usestringh_532345_839829468((*p0).module); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = addrloc_538204_839829468(dest0); LOC15[1] = addrloc_538204_839829468(src0); LOC15[2] = rdloc_538188_839829468(dest0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3); } goto LA8; LA13: ; { TY535238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_538204_839829468(dest0); LOC17[1] = addrloc_538204_839829468(src0); LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3); } LA8: ; } goto LA1; LA6: ; { TY535238 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_538204_839829468(dest0); LOC19[1] = addrloc_538204_839829468(src0); LOC19[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3); } LA1: ; } N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0) { NI result0; result0 = (NI)0; { if (!!((n0 == NIM_NIL))) goto LA3; switch ((*n0).kind) { case ((Tnodekind292020) 3): { result0 = ((NI) 1); } break; case ((Tnodekind292020) 139): { result0 = ((NI) 100); } break; case ((Tnodekind292020) 138): { { Tnode292802* t_549768_839829468; t_549768_839829468 = (Tnode292802*)0; { NI i_549782_839829468; NI HEX3Atmp_549784_839829468; NI LOC10; NI res_549786_839829468; i_549782_839829468 = (NI)0; HEX3Atmp_549784_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = len_293081_850551059(n0); HEX3Atmp_549784_839829468 = (LOC10 - 1); res_549786_839829468 = ((NI) 0); { while (1) { NI LOC13; if (!(res_549786_839829468 <= HEX3Atmp_549784_839829468)) goto LA12; i_549782_839829468 = res_549786_839829468; t_549768_839829468 = (*n0).kindU.S6.sons->data[i_549782_839829468]; LOC13 = (NI)0; LOC13 = asgncomplexity_549751_839829468(t_549768_839829468); result0 += LOC13; res_549786_839829468 += ((NI) 1); } LA12: ; } } } } break; default: { } break; } } LA3: ; return result0; } N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0) { Tassignmentflag538302Set newflags0; { { if (!(t0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; { if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA7; newflags0 = (flags0 | 1); } goto LA5; LA7: ; { if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10; newflags0 = (flags0 & ~ 1); } goto LA5; LA10: ; { newflags0 = flags0; } LA5: ; switch ((*t0).kind) { case ((Tnodekind292020) 3): { Tsym292834* field0; Tloc292816 LOC14; Tloc292816 LOC15; field0 = (*t0).kindU.S4.sym; memset((void*)(&LOC14), 0, sizeof(LOC14)); optasgnloc_549789_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14)); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_549789_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15)); genassignment_539264_839829468(p0, (&LOC14), (&LOC15), newflags0); } break; case ((Tnodekind292020) 138): { { Tnode292802* child_550155_839829468; child_550155_839829468 = (Tnode292802*)0; { NI i_550160_839829468; NI HEX3Atmp_550162_839829468; NI LOC19; NI res_550164_839829468; i_550160_839829468 = (NI)0; HEX3Atmp_550162_839829468 = (NI)0; LOC19 = (NI)0; LOC19 = len_293081_850551059(t0); HEX3Atmp_550162_839829468 = (LOC19 - 1); res_550164_839829468 = ((NI) 0); { while (1) { if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21; i_550160_839829468 = res_550164_839829468; child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468]; genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468); res_550164_839829468 += ((NI) 1); } LA21: ; } } } } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) { Ttype292840* ty0; { { NIM_BOOL LOC3; TY532811 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = !(((*src0).t == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = ((*(*src0).t).kind == ((Ttypekind292244) 21)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_538188_839829468(dest0); LOC7[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2); goto BeforeRet; } LA5: ; ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106233624832)); switch ((*ty0).kind) { case ((Ttypekind292244) 22): { genrefassign_538311_839829468(p0, dest0, src0, flags0); } break; case ((Ttypekind292244) 24): { { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0)); if (!(LOC12)) goto LA13; LOC12 = !(((*src0).s == ((Tstorageloc292812) 1))); LA13: ; if (!LOC12) goto LA14; genrefassign_538311_839829468(p0, dest0, src0, flags0); } goto LA10; LA14: ; { TY535238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_538204_839829468(dest0); LOC17[1] = rdloc_538188_839829468(src0); LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3); } LA10: ; } break; case ((Ttypekind292244) 28): { { NIM_BOOL LOC21; LOC21 = (NIM_BOOL)0; LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0)); if (!(LOC21)) goto LA22; LOC21 = !(((*src0).s == ((Tstorageloc292812) 1))); LA22: ; if (!LOC21) goto LA23; genrefassign_538311_839829468(p0, dest0, src0, flags0); } goto LA19; LA23: ; { { NIM_BOOL LOC28; NIM_BOOL LOC30; TY532811 LOC33; LOC28 = (NIM_BOOL)0; LOC28 = ((*dest0).s == ((Tstorageloc292812) 2)); if (LOC28) goto LA29; LOC30 = (NIM_BOOL)0; LOC30 = usesnativegc_169177_2607990831(); LOC28 = !(LOC30); LA29: ; if (!LOC28) goto LA31; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_538188_839829468(dest0); LOC33[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2); } goto LA26; LA31: ; { Tloc292816 tmp0; TY535238 LOC37; TY178507 LOC38; if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA35; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_538188_839829468(dest0); LOC37[1] = rdloc_538188_839829468(src0); LOC37[2] = rdloc_538188_839829468((&tmp0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_538188_839829468((&tmp0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1); } goto LA26; LA35: ; { TY532811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = addrloc_538204_839829468(dest0); LOC40[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2); } LA26: ; } LA19: ; } break; case ((Ttypekind292244) 25): { { NIM_BOOL LOC44; Tloc292816 a0; Ropeobj178006* LOC47; Tloc292816 LOC48; Tloc292816 b0; Ropeobj178006* LOC49; Tloc292816 LOC50; TY532811 LOC51; LOC44 = (NIM_BOOL)0; LOC44 = needscomplexassignment_533511_839829468((*dest0).t); if (!LOC44) goto LA45; memset((void*)(&a0), 0, sizeof(a0)); LOC47 = (Ropeobj178006*)0; LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC48), 0, sizeof(LOC48)); optasgnloc_549789_839829468(dest0, (*dest0).t, LOC47, (&LOC48)); memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); LOC49 = (Ropeobj178006*)0; LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC50), 0, sizeof(LOC50)); optasgnloc_549789_839829468(src0, (*dest0).t, LOC49, (&LOC50)); memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0)); genrefassign_538311_839829468(p0, (&a0), (&b0), flags0); memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_538188_839829468(dest0); LOC51[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2); } goto LA42; LA45: ; { TY532811 LOC53; memset((void*)LOC53, 0, sizeof(LOC53)); LOC53[0] = rdloc_538188_839829468(dest0); LOC53[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2); } LA42: ; } break; case ((Ttypekind292244) 18): { { NIM_BOOL LOC57; LOC57 = (NIM_BOOL)0; LOC57 = needscomplexassignment_533511_839829468((*dest0).t); if (!LOC57) goto LA58; { NI LOC62; LOC62 = (NI)0; LOC62 = len_295339_850551059((*dest0).t); if (!(LOC62 <= ((NI) 4))) goto LA63; genoptasgntuple_550001_839829468(p0, dest0, src0, flags0); } goto LA60; LA63: ; { gengenericasgn_550167_839829468(p0, dest0, src0, flags0); } LA60: ; } goto LA55; LA58: ; { TY532811 LOC67; memset((void*)LOC67, 0, sizeof(LOC67)); LOC67[0] = rdloc_538188_839829468(dest0); LOC67[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2); } LA55: ; } break; case ((Ttypekind292244) 17): { { NIM_BOOL LOC71; TY532811 LOC74; LOC71 = (NIM_BOOL)0; LOC71 = isimportedcpptype_533478_839829468(ty0); if (!LOC71) goto LA72; memset((void*)LOC74, 0, sizeof(LOC74)); LOC74[0] = rdloc_538188_839829468(dest0); LOC74[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2); } goto LA69; LA72: ; { NIM_BOOL LOC76; LOC76 = (NIM_BOOL)0; LOC76 = isobjlackingtypefield_533515_839829468(ty0); if (!!(LOC76)) goto LA77; gengenericasgn_550167_839829468(p0, dest0, src0, flags0); } goto LA69; LA77: ; { NIM_BOOL LOC80; LOC80 = (NIM_BOOL)0; LOC80 = needscomplexassignment_533511_839829468(ty0); if (!LOC80) goto LA81; { NIM_BOOL LOC85; NI LOC87; Ropeobj178006* LOC90; LOC85 = (NIM_BOOL)0; LOC85 = (*ty0).sons->data[((NI) 0)] == 0; if (!(LOC85)) goto LA86; LOC87 = (NI)0; LOC87 = asgncomplexity_549751_839829468((*ty0).n); LOC85 = (LOC87 <= ((NI) 4)); LA86: ; if (!LOC85) goto LA88; LOC90 = (Ropeobj178006*)0; LOC90 = gettypedesc_535673_839829468((*p0).module, ty0); ty0 = getuniquetype_528640_2036603609(ty0); { NimStringDesc* LOC95; if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93; LOC95 = (NimStringDesc*)0; LOC95 = HEX24_196185_1689653243(T839829468_264); internalerror_196113_155036129(LOC95); } LA93: ; genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n); } goto LA83; LA88: ; { gengenericasgn_550167_839829468(p0, dest0, src0, flags0); } LA83: ; } goto LA69; LA81: ; { TY532811 LOC98; memset((void*)LOC98, 0, sizeof(LOC98)); LOC98[0] = rdloc_538188_839829468(dest0); LOC98[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2); } LA69: ; } break; case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = needscomplexassignment_533511_839829468((*dest0).t); if (!LOC102) goto LA103; gengenericasgn_550167_839829468(p0, dest0, src0, flags0); } goto LA100; LA103: ; { TY535238 LOC106; usestringh_532345_839829468((*p0).module); memset((void*)LOC106, 0, sizeof(LOC106)); LOC106[0] = rdloc_538188_839829468(dest0); LOC106[1] = rdloc_538188_839829468(src0); LOC106[2] = gettypedesc_535673_839829468((*p0).module, ty0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3); } LA100: ; } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { { NIM_BOOL LOC110; TY535238 LOC113; LOC110 = (NIM_BOOL)0; LOC110 = needscomplexassignment_533511_839829468((*dest0).t); if (!LOC110) goto LA111; memset((void*)LOC113, 0, sizeof(LOC113)); LOC113[0] = addrloc_538204_839829468(dest0); LOC113[1] = addrloc_538204_839829468(src0); LOC113[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3); } goto LA108; LA111: ; { TY532811 LOC115; usestringh_532345_839829468((*p0).module); memset((void*)LOC115, 0, sizeof(LOC115)); LOC115[0] = rdloc_538188_839829468(dest0); LOC115[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2); } LA108: ; } break; case ((Ttypekind292244) 19): { { Tctypekind529007 LOC119; TY535238 LOC122; NI64 LOC123; LOC119 = (Tctypekind529007)0; LOC119 = maptype_533394_839829468(ty0); if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120; usestringh_532345_839829468((*p0).module); memset((void*)LOC122, 0, sizeof(LOC122)); LOC122[0] = rdloc_538188_839829468(dest0); LOC122[1] = rdloc_538188_839829468(src0); LOC123 = (NI64)0; LOC123 = getsize_320135_3876443242((*dest0).t); LOC122[2] = rope_178401_2381377266(LOC123); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3); } goto LA117; LA120: ; { TY532811 LOC125; memset((void*)LOC125, 0, sizeof(LOC125)); LOC125[0] = rdloc_538188_839829468(dest0); LOC125[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2); } LA117: ; } break; case ((Ttypekind292244) 21): case ((Ttypekind292244) 26): case ((Ttypekind292244) 2): case ((Ttypekind292244) 1): case ((Ttypekind292244) 14): case ((Ttypekind292244) 29): case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44): case ((Ttypekind292244) 20): case ((Ttypekind292244) 23): { TY532811 LOC127; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rdloc_538188_839829468(dest0); LOC127[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2); } break; default: { NimStringDesc* LOC129; LOC129 = (NimStringDesc*)0; LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15); appendString(LOC129, ((NimStringDesc*) &T839829468_269)); appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244))); internalerror_196113_155036129(LOC129); } break; } }BeforeRet: ; } N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0) { { if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7; genassignment_539264_839829468(p0, (&(*d0)), s0, 0); } goto LA5; LA7: ; { genassignment_539264_839829468(p0, (&(*d0)), s0, 1); } LA5: ; } goto LA1; LA3: ; { genericAssign((void*)(&(*d0)), (void*)s0, (&NTI292816)); } LA1: ; } N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) { NIM_BOOL result0; Ttype292840* t0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864)); LOC1 = (NIM_BOOL)0; LOC1 = !(((17760272 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind292244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8)); LA4: ; LOC1 = !(LOC3); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) { Tloc292816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3; initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7; genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0); } goto LA5; LA7: ; { genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind292808) 6); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) { NI64 result0; result0 = (NI64)0; result0 = IL64(0); { NI j_549612_839829468; NI HEX3Atmp_549622_839829468; NI res_549625_839829468; j_549612_839829468 = (NI)0; HEX3Atmp_549622_839829468 = (NI)0; HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1)); res_549625_839829468 = ((NI) 0); { while (1) { if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3; j_549612_839829468 = res_549625_839829468; { if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6; result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8))))))); } LA6: ; res_549625_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) { Ropeobj178006* result0; NimStringDesc* frmt0; result0 = (Ropeobj178006*)0; frmt0 = (NimStringDesc*)0; { TY533289 LOC5; if (!(((NI) 8) < size0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0); { NI i_549649_839829468; NI HEX3Atmp_549657_839829468; NI res_549660_839829468; i_549649_839829468 = (NI)0; HEX3Atmp_549657_839829468 = (NI)0; HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1)); res_549660_839829468 = ((NI) 0); { while (1) { TY178507 LOC19; NimStringDesc* LOC20; if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8; i_549649_839829468 = res_549660_839829468; { if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11; { if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15; frmt0 = copyString(((NimStringDesc*) &T839829468_274)); } goto LA13; LA15: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_275)); } LA13: ; } goto LA9; LA11: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_276)); } LA9: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (NimStringDesc*)0; LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2)); LOC19[0] = rope_178277_2381377266(LOC20); addf_179205_2381377266(&result0, frmt0, LOC19, 1); res_549660_839829468 += ((NI) 1); } LA8: ; } } } goto LA1; LA3: ; { NI64 LOC22; LOC22 = (NI64)0; LOC22 = bitsettoword_549578_839829468(cs0, size0); result0 = intliteral_539270_839829468(LOC22); } LA1: ; return result0; } N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) { Ropeobj178006* LOC1; LOC1 = (Ropeobj178006*)0; LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0); add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1); } N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) { Ropeobj178006* result0; Ropeobj178006* data0; TY178507 LOC1; NI LOC2; TY535235 LOC18; NI LOC19; TY532811 LOC20; result0 = (Ropeobj178006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = len_293081_850551059(n0); LOC1[0] = rope_178401_2381377266(((NI64) (LOC2))); data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1); { NI LOC5; LOC5 = (NI)0; LOC5 = len_293081_850551059(n0); if (!(((NI) 0) < LOC5)) goto LA6; add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278)); { NI i_559395_839829468; NI HEX3Atmp_559411_839829468; NI LOC9; NI res_559414_839829468; i_559395_839829468 = (NI)0; HEX3Atmp_559411_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = len_293081_850551059(n0); HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1)); res_559414_839829468 = ((NI) 0); { while (1) { Ropeobj178006* LOC17; if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11; i_559395_839829468 = res_559414_839829468; { TY533289 LOC16; if (!(((NI) 0) < i_559395_839829468)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0); } LA14: ; LOC17 = (Ropeobj178006*)0; LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]); add_178482_2381377266(&data0, LOC17); res_559414_839829468 += ((NI) 1); } LA11: ; } } add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); } LA6: ; add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); result0 = gettempname_533598_839829468((*p0).module); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); LOC19 = (NI)0; LOC19 = len_293081_850551059(n0); LOC18[1] = rope_178401_2381377266(((NI64) (LOC19))); LOC18[2] = result0; LOC18[3] = data0; appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4); memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC20[1] = result0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2); return result0; } N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3; result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA3: ; { result0 = genconstexpr_554849_839829468(p0, n0); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; NI length0; TY533289 LOC10; result0 = (Ropeobj178006*)0; length0 = sonslen_295351_850551059(n0); result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223)); { NI i_559333_839829468; NI HEX3Atmp_559362_839829468; NI HEX3Atmp_559363_839829468; NI res_559366_839829468; i_559333_839829468 = (NI)0; HEX3Atmp_559362_839829468 = (NI)0; HEX3Atmp_559363_839829468 = (NI)0; HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38)); HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2)); res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468)); { while (1) { TY178507 LOC4; if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3; i_559333_839829468 = res_559366_839829468; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1); res_559366_839829468 += ((NI) 1); } LA3: ; } } { Ropeobj178006* LOC9; if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7; LOC9 = (Ropeobj178006*)0; LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]); add_178482_2381377266(&result0, LOC9); } LA7: ; memset((void*)LOC10, 0, sizeof(LOC10)); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0); return result0; } N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; switch ((*n0).kind) { case ((Tnodekind292020) 58): case ((Tnodekind292020) 59): { result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } break; case ((Tnodekind292020) 39): { Tbitset339004* cs0; NI64 LOC3; cs0 = (Tbitset339004*)0; tobitset_340001_452470228(n0, (&cs0)); LOC3 = (NI64)0; LOC3 = getsize_320135_3876443242((*n0).typ); result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3))); } break; case ((Tnodekind292020) 41): case ((Tnodekind292020) 37): case ((Tnodekind292020) 155): case ((Tnodekind292020) 38): { Ttype292840* t0; t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256)); { if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7; result0 = genconstseq_559371_839829468(p0, n0, t0); } goto LA5; LA7: ; { result0 = genconstsimplelist_559299_839829468(p0, n0); } LA5: ; } break; default: { Tloc292816 d0; memset((void*)(&d0), 0, sizeof(d0)); initlocexpr_539283_839829468(p0, n0, (&d0)); result0 = rdloc_538188_839829468((&d0)); } break; } return result0; } N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) { Tcgen529027* m0; Tcgen529027* q0; { m0 = (*p0).module; useheader_532369_839829468(m0, sym0); { Ropeobj178006* LOC5; if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = manglename_533205_839829468(sym0); fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1)); } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8; goto BeforeRet; } LA8: ; q0 = findpendingmodule_532241_839829468(m0, sym0); { NIM_BOOL LOC12; NIM_BOOL LOC14; TY535238 LOC17; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_535673_839829468(q0, (*sym0).typ); LOC17[1] = (*sym0).loc.r; LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast); addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; { NIM_BOOL LOC20; NIM_BOOL LOC22; Ropeobj178006* headerdecl0; TY532811 LOC25; LOC20 = (NIM_BOOL)0; LOC20 = !((q0 == m0)); if (!(LOC20)) goto LA21; LOC22 = (NIM_BOOL)0; LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC20 = !(LOC22); LA21: ; if (!LOC20) goto LA23; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t); LOC25[1] = (*sym0).loc.r; headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0); { NIM_BOOL LOC28; LOC28 = (NIM_BOOL)0; LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0); if (!(LOC28)) goto LA29; LOC28 = !((generatedheader_532201_839829468 == NIM_NIL)); LA29: ; if (!LOC28) goto LA30; add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0); } LA30: ; } LA23: ; }BeforeRet: ; } N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) { requestconstimpl_539240_839829468(p0, sym0); putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) { Ropeobj178006** result0; result0 = (Ropeobj178006**)0; result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0]; return result0; } N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) { { NIM_BOOL LOC3; Ropeobj178006** LOC7; TY533289 LOC8; Ropeobj178006** LOC9; TY533289 LOC10; Ropeobj178006* LOC11; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_532949_839829468(); if (!(LOC3)) goto LA4; LOC3 = !((*p0).threadvaraccessed); LA4: ; if (!LOC3) goto LA5; (*p0).threadvaraccessed = NIM_TRUE; (*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8)); LOC7 = (Ropeobj178006**)0; LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0)); memset((void*)LOC8, 0, sizeof(LOC8)); addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0); LOC9 = (Ropeobj178006**)0; LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC11 = (Ropeobj178006*)0; LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0); add_178482_2381377266(LOC9, LOC11); } LA5: ; } static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = (t0 == NIM_NIL); if (LOC1) goto LA2; LOC1 = ((IL64(4611686018427388032) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) { Tloc292816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3; initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1)); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7; genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0); } goto LA5; LA7: ; { genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind292808) 8); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*p0).lastlineinfo.line == info0.line)); if (LOC3) goto LA4; LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex)); LA4: ; if (!LOC3) goto LA5; (*p0).lastlineinfo.line = info0.line; (*p0).lastlineinfo.fileindex = info0.fileindex; result0 = NIM_TRUE; } LA5: ; return result0; } N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) { NI line0; Ropeobj178006** LOC11; NimStringDesc* LOC12; line0 = safelinenm_532721_839829468((*t0).info); { Ropeobj178006** LOC5; TY533289 LOC6; Ropeobj178006* LOC7; Ropeobj178006* LOC8; Ropeobj178006* LOC9; Ropeobj178006* LOC10; if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3; LOC5 = (Ropeobj178006**)0; LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj178006*)0; LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0); LOC8 = (Ropeobj178006*)0; LOC8 = sourceline_192065_155036129((*t0).info); LOC9 = (Ropeobj178006*)0; LOC9 = HEX26_178418_2381377266(LOC7, LOC8); LOC10 = (Ropeobj178006*)0; LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266); add_178482_2381377266(LOC5, LOC10); } LA3: ; LOC11 = (Ropeobj178006**)0; LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); LOC12 = (NimStringDesc*)0; LOC12 = tofullpath_192261_155036129((*t0).info.fileindex); genclinedir_532725_839829468(LOC11, LOC12, line0); { NIM_BOOL LOC15; NIM_BOOL LOC17; LOC15 = (NIM_BOOL)0; LOC15 = ((163840 & (*p0).options) == 163840); if (!(LOC15)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = ((*p0).prc == NIM_NIL); if (LOC17) goto LA18; LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)); LA18: ; LOC15 = LOC17; LA16: ; if (!LOC15) goto LA19; { NIM_BOOL LOC23; TY532811 LOC26; NimStringDesc* LOC27; LOC23 = (NIM_BOOL)0; LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info); if (!LOC23) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rope_178401_2381377266(((NI64) (line0))); LOC27 = (NimStringDesc*)0; LOC27 = tofilename_192257_155036129((*t0).info.fileindex); LOC26[1] = makecstring_191638_155036129(LOC27); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2); } LA24: ; } goto LA13; LA19: ; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC32; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((98304 & (*p0).options) == 98304); if (!(LOC30)) goto LA31; LOC32 = (NIM_BOOL)0; LOC32 = ((*p0).prc == NIM_NIL); if (LOC32) goto LA33; LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)); LA33: ; LOC30 = LOC32; LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA34; LOC29 = (((NI32) 0) <= (*t0).info.fileindex); LA34: ; if (!LOC29) goto LA35; { NIM_BOOL LOC39; TY532811 LOC42; LOC39 = (NIM_BOOL)0; LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info); if (!LOC39) goto LA40; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rope_178401_2381377266(((NI64) (line0))); LOC42[1] = quotedfilename_196818_155036129((*t0).info); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2); } LA40: ; } goto LA13; LA35: ; LA13: ; } N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj178006*)0; LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels))); result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1); return result0; } N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) { TY178507 LOC1; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = labl0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1); } N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) { Ropeobj178006* L0; Tloc292816 tmp0; L0 = (Ropeobj178006*)0; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); (*p0).splitdecls += ((NI) 1); expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); L0 = getlabel_539217_839829468(p0); { TY532811 LOC5; if (!(m0 == ((Tmagic292524) 127))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&tmp0)); LOC5[1] = L0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2); } goto LA1; LA3: ; { TY532811 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_538188_839829468((&tmp0)); LOC7[1] = L0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2); } LA1: ; expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0)); fixlabel_539230_839829468(p0, L0); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA10; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816)); } goto LA8; LA10: ; { genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA8: ; (*p0).splitdecls -= ((NI) 1); } N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) { Tloc292816 a0; Ttype292840* t0; TY535238 LOC1; NI64 LOC2; Ropeobj178006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype292840*)0; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); LOC2 = (NI64)0; LOC2 = getsize_320135_3876443242(t0); LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8))); LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ); LOC3 = (Ropeobj178006*)0; LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0)); } N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) { Tloc292816 a0; Ttype292840* t0; TY532811 LOC7; NI64 LOC8; Ropeobj178006* LOC9; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype292840*)0; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832)); { TY532811 LOC5; NI64 LOC6; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&a0)); LOC6 = (NI64)0; LOC6 = firstord_320001_3876443242(t0); LOC5[1] = intliteral_539270_839829468(LOC6); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2); } LA3: ; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_538188_839829468((&a0)); LOC8 = (NI64)0; LOC8 = getsize_320135_3876443242(t0); LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8))); LOC9 = (Ropeobj178006*)0; LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0)); } N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) { Tloc292816 a0; Tloc292816 b0; NI64 s0; NI64 LOC1; NI64 LOC2; TY535235 LOC3; Ropeobj178006* LOC4; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); s0 = (NI64)0; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (NI64)0; LOC1 = getsize_320135_3876443242(a0.t); LOC2 = (NI64)0; LOC2 = getsize_320135_3876443242(b0.t); s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8)); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = rdloc_538188_839829468((&a0)); LOC3[1] = rdloc_538188_839829468((&b0)); LOC3[2] = rope_178401_2381377266(s0); LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ); LOC4 = (Ropeobj178006*)0; LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0)); } N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) { { Tloc292816 a0; Tloc292816 b0; TY535235 LOC5; Tnode292802* LOC6; Ropeobj178006* LOC7; if (!!(((384 & (*p0).options) == 0))) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_178277_2381377266(opr_556763_839829468[(m0)- 52]); LOC5[1] = rdloc_538188_839829468((&a0)); LOC5[2] = rdloc_538188_839829468((&b0)); LOC6 = (Tnode292802*)0; LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1)); LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ); LOC7 = (Ropeobj178006*)0; LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0)); { TY178507 LOC12; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_538188_839829468((&(*d0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1); } LA10: ; { TY178507 LOC17; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_538188_839829468((&(*d0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1); } LA15: ; } goto LA1; LA3: ; { binaryarith_551819_839829468(p0, e0, d0, m0); } LA1: ; } N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype292840* LOC3; TY532811 LOC6; Ropeobj178006* LOC7; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256)); if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_538188_839829468((&a0)); LOC6[1] = rdloc_538188_839829468((&b0)); LOC7 = (Ropeobj178006*)0; LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0)); } goto LA1; LA4: ; { TY532811 LOC9; Ropeobj178006* LOC10; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rdloc_538188_839829468((&a0)); LOC9[1] = rdloc_538188_839829468((&b0)); LOC10 = (Ropeobj178006*)0; LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0)); } LA1: ; } N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = rdloc_538188_839829468(a0); { Ttype292840* LOC3; TY178507 LOC6; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059((*a0).t, IL64(211106233624832)); if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1); } LA4: ; return result0; } N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0) { Ropeobj178006* result0; NI64 size0; Ropeobj178006* storage0; TY532811 LOC6; TY535238 LOC7; result0 = (Ropeobj178006*)0; size0 = getsize_320135_3876443242(t0); { if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3; storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36)); } goto LA1; LA3: ; { storage0 = gettypedesc_535673_839829468((*p0).module, t0); } LA1: ; result0 = gettempname_533598_839829468((*p0).module); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = storage0; LOC6[1] = result0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = result0; LOC7[1] = rdcharloc_538227_839829468(a0); LOC7[2] = rdcharloc_538227_839829468(b0); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3); { NIM_BOOL LOC10; TY535238 LOC14; NI64 LOC15; NI64 LOC16; LOC10 = (NIM_BOOL)0; LOC10 = (size0 < ((NI64) (intsize_176641_4151366050))); if (LOC10) goto LA11; LOC10 = ((1064960 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC15 = (NI64)0; LOC15 = firstord_320001_3876443242(t0); LOC14[1] = intliteral_539270_839829468(LOC15); LOC16 = (NI64)0; LOC16 = lastord_320004_3876443242(t0); LOC14[2] = intliteral_539270_839829468(LOC16); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3); } LA12: ; return result0; } N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* t0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832)); { Ropeobj178006* res0; TY535238 LOC5; if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC5[1] = rdloc_538188_839829468((&a0)); LOC5[2] = rdloc_538188_839829468((&b0)); res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3); putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0)); } goto LA1; LA3: ; { Ropeobj178006* res0; NimStringDesc* LOC7; TY532811 LOC13; Ropeobj178006* LOC14; LOC7 = (NimStringDesc*)0; { if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10; LOC7 = copyString(prc64_551274_839829468[(m0)- 45]); } goto LA8; LA10: ; { LOC7 = copyString(prc_551269_839829468[(m0)- 45]); } LA8: ; res0 = binaryarithoverflowraw_551235_839829468(p0, t0, (&a0), (&b0), LOC7); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC13[1] = res0; LOC14 = (Ropeobj178006*)0; LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0)); } LA1: ; } N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) { Ropeobj178006* result0; NimStringDesc* LOC1; result0 = (Ropeobj178006*)0; LOC1 = (NimStringDesc*)0; { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC4) goto LA5; LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA5: ; if (!LOC4) goto LA6; LOC1 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA2; LA6: ; { LOC1 = copyString(((NimStringDesc*) &T839829468_158)); } LA2: ; result0 = rope_178277_2381377266(LOC1); return result0; } N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) { { NimStringDesc* LOC5; if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = rendertree_311044_382274130(n0, 0); message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5); } LA3: ; } N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* t0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); switch ((*t0).kind) { case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35): case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44): { TY178507 LOC2; Ropeobj178006* LOC3; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_538188_839829468((&a0)); LOC3 = (Ropeobj178006*)0; LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s); } break; case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39): { TY178507 LOC5; Ropeobj178006* LOC6; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&a0)); LOC6 = (Ropeobj178006*)0; LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } break; case ((Ttypekind292244) 1): { TY178507 LOC8; Ropeobj178006* LOC9; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468((&a0)); LOC9 = (Ropeobj178006*)0; LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s); } break; case ((Ttypekind292244) 2): { TY178507 LOC11; Ropeobj178006* LOC12; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_538188_839829468((&a0)); LOC12 = (Ropeobj178006*)0; LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s); } break; case ((Ttypekind292244) 14): case ((Ttypekind292244) 15): { TY532811 LOC14; Ropeobj178006* LOC15; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_538188_839829468((&a0)); LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0); LOC15 = (Ropeobj178006*)0; LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } break; case ((Ttypekind292244) 28): { TY178507 LOC17; Ropeobj178006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_538188_839829468((&a0)); LOC18 = (Ropeobj178006*)0; LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } break; case ((Ttypekind292244) 19): { TY532811 LOC20; Ropeobj178006* LOC21; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = addrloc_538204_839829468((&a0)); LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0); LOC21 = (Ropeobj178006*)0; LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s); } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { Tloc292816 b0; TY532811 LOC34; Ttype292840* LOC35; Ropeobj178006* LOC36; memset((void*)(&b0), 0, sizeof(b0)); switch ((*a0.t).kind) { case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { TY178507 LOC24; Ropeobj178006* LOC25; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = rdloc_538188_839829468((&a0)); LOC25 = (Ropeobj178006*)0; LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1); putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s); } break; case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { TY532811 LOC27; Ropeobj178006* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rdloc_538188_839829468((&a0)); LOC27[1] = lenfield_539305_839829468(p0); LOC28 = (Ropeobj178006*)0; LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2); putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s); } break; case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { TY532811 LOC30; NI64 LOC31; Ropeobj178006* LOC32; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = rdloc_538188_839829468((&a0)); LOC31 = (NI64)0; LOC31 = lengthord_320007_3876443242(a0.t); LOC30[1] = rope_178401_2381377266(LOC31); LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2); putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s); } break; default: { internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381)); } break; } memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_538188_839829468((&b0)); LOC35 = (Ttype292840*)0; LOC35 = elemtype_320394_3876443242(t0); LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35); LOC36 = (Ropeobj178006*)0; LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s); } break; case ((Ttypekind292244) 29): case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): case ((Ttypekind292244) 22): case ((Ttypekind292244) 21): case ((Ttypekind292244) 26): case ((Ttypekind292244) 5): case ((Ttypekind292244) 24): { TY532811 LOC38; Ropeobj178006* LOC39; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_538188_839829468((&a0)); LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0); LOC39 = (Ropeobj178006*)0; LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s); } break; case ((Ttypekind292244) 3): case ((Ttypekind292244) 62): { localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384)); } break; default: { TY532811 LOC42; Ropeobj178006* LOC43; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = addrloc_538204_839829468((&a0)); LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0); LOC43 = (Ropeobj178006*)0; LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s); } break; } gcusage_554439_839829468(e0); } N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Ttype292840* t0; Ropeobj178006* LOC1; t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); LOC1 = (Ropeobj178006*)0; LOC1 = gentypeinfo_535941_839829468((*p0).module, t0); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0)); } N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Tloc292816 tmp0; Ttype292840* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); LOC1 = (Ttype292840*)0; LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); genassignment_539264_839829468(p0, (&tmp0), (&a0), 0); genassignment_539264_839829468(p0, (&a0), (&b0), 0); genassignment_539264_839829468(p0, (&b0), (&tmp0), 0); } N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; TY178507 LOC1; Ropeobj178006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); } N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; Tloc292816 b0; TY532811 LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387)); } LA3: ; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&a0)); LOC5[1] = rdloc_538188_839829468((&b0)); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2); } N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 tmp0; NI L0; Ropeobj178006* appends0; Ropeobj178006* lens0; TY535238 LOC21; Ropeobj178006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); L0 = ((NI) 0); appends0 = NIM_NIL; lens0 = NIM_NIL; { NI i_554475_839829468; NI HEX3Atmp_554547_839829468; NI LOC2; NI res_554550_839829468; i_554475_839829468 = (NI)0; HEX3Atmp_554547_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(e0); HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2)); res_554550_839829468 = ((NI) 0); { while (1) { if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4; i_554475_839829468 = res_554550_839829468; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0)); { Ttype292840* LOC7; TY532811 LOC10; Ropeobj178006* LOC11; LOC7 = (Ttype292840*)0; LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0.r; LOC10[1] = rdloc_538188_839829468((&a0)); LOC11 = (Ropeobj178006*)0; LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_178482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY532811 LOC19; Ropeobj178006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY532811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_538188_839829468((&a0)); LOC18[1] = lenfield_539305_839829468(p0); addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0.r; LOC19[1] = rdloc_538188_839829468((&a0)); LOC20 = (Ropeobj178006*)0; LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_178482_2381377266(&appends0, LOC20); } LA5: ; res_554550_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = tmp0.r; LOC21[1] = lens0; LOC21[2] = rope_178401_2381377266(((NI64) (L0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3); LOC22 = (Ropeobj178006**)0; LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC22, appends0); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA25; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816)); } goto LA23; LA25: ; { genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA23: ; gcusage_554439_839829468(e0); } N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 dest0; Ropeobj178006* appends0; Ropeobj178006* lens0; NI L0; TY535238 LOC21; Ropeobj178006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&dest0), 0, sizeof(dest0)); appends0 = (Ropeobj178006*)0; lens0 = (Ropeobj178006*)0; L0 = ((NI) 0); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0)); { NI i_554615_839829468; NI HEX3Atmp_554676_839829468; NI LOC2; NI res_554679_839829468; i_554615_839829468 = (NI)0; HEX3Atmp_554676_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(e0); HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3)); res_554679_839829468 = ((NI) 0); { while (1) { if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4; i_554615_839829468 = res_554679_839829468; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0)); { Ttype292840* LOC7; TY532811 LOC10; Ropeobj178006* LOC11; LOC7 = (Ttype292840*)0; LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_538188_839829468((&dest0)); LOC10[1] = rdloc_538188_839829468((&a0)); LOC11 = (Ropeobj178006*)0; LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_178482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY532811 LOC19; Ropeobj178006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY532811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_538188_839829468((&a0)); LOC18[1] = lenfield_539305_839829468(p0); addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_538188_839829468((&dest0)); LOC19[1] = rdloc_538188_839829468((&a0)); LOC20 = (Ropeobj178006*)0; LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_178482_2381377266(&appends0, LOC20); } LA5: ; res_554679_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_538188_839829468((&dest0)); LOC21[1] = lens0; LOC21[2] = rope_178401_2381377266(((NI64) (L0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3); LOC22 = (Ropeobj178006**)0; LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC22, appends0); gcusage_554439_839829468(e0); } N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { NimStringDesc* seqappendpattern0; Tloc292816 a0; Tloc292816 b0; Tloc292816 dest0; Ttype292840* bt0; TY535238 LOC8; Ttype292840* LOC9; TY532811 LOC10; TY532811 LOC11; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396)); } goto LA1; LA5: ; { seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397)); } LA1: ; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468((&a0)); LOC9 = (Ttype292840*)0; LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC8[1] = gettypedesc_535673_839829468((*p0).module, LOC9); LOC8[2] = gettypedesc_535673_839829468((*p0).module, bt0); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3); initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_538188_839829468((&a0)); LOC10[1] = lenfield_539305_839829468(p0); dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2); genassignment_539264_839829468(p0, (&dest0), (&b0), 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_538188_839829468((&a0)); LOC11[1] = lenfield_539305_839829468(p0); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2); gcusage_554439_839829468(e0); } N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; Tloc292816 b0; TY532811 LOC1; Ropeobj178006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); LOC1[1] = rdloc_538188_839829468((&b0)); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); } N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 x0; Tnode292802* a0; Tnode292802* b0; memset((void*)(&x0), 0, sizeof(x0)); a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; b0 = (*e0).kindU.S6.sons->data[((NI) 2)]; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*a0).kind == ((Tnodekind292020) 23)); if (LOC3) goto LA4; LOC3 = ((*b0).kind == ((Tnodekind292020) 23)); LA4: ; if (!LOC3) goto LA5; binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } goto LA1; LA5: ; { NIM_BOOL LOC8; TY532811 LOC12; Ropeobj178006* LOC13; LOC8 = (NIM_BOOL)0; LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22)); if (!(LOC8)) goto LA9; LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0); LA9: ; if (!LOC8) goto LA10; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0)); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_538188_839829468((&x0)); LOC12[1] = lenfield_539305_839829468(p0); LOC13 = (Ropeobj178006*)0; LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0)); } goto LA1; LA10: ; { NIM_BOOL LOC15; TY532811 LOC19; Ropeobj178006* LOC20; LOC15 = (NIM_BOOL)0; LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22)); if (!(LOC15)) goto LA16; LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0); LA16: ; if (!LOC15) goto LA17; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_538188_839829468((&x0)); LOC19[1] = lenfield_539305_839829468(p0); LOC20 = (Ropeobj178006*)0; LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0)); } goto LA1; LA17: ; { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401)); } LA1: ; } N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Ttype292840* t0; t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind292244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8)); LA4: ; if (!LOC3) goto LA5; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404)); } goto LA1; LA5: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405)); } LA1: ; } N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; TY178507 LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA4; gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA4: ; genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0); gcusage_554439_839829468(n0); } N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) { Ropeobj178006* result0; Ropeobj178006* ti0; result0 = (Ropeobj178006*)0; ti0 = gentypeinfo_535941_839829468((*p0).module, dest0); { NIM_BOOL LOC3; NIM_BOOL LOC5; TY532811 LOC9; LOC3 = (NIM_BOOL)0; LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = a0; LOC9[1] = ti0; result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2); } goto LA1; LA7: ; { Ropeobj178006* LOC11; Ropeobj178006* cache0; Ropeobj178006* LOC12; TY178507 LOC13; TY535238 LOC14; LOC11 = (Ropeobj178006*)0; LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129)); (*(*p0).module).labels += ((NI) 1); LOC12 = (Ropeobj178006*)0; LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels))); cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = cache0; addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = a0; LOC14[1] = ti0; LOC14[2] = cache0; result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3); } LA1: ; return result0; } N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* dest0; Ropeobj178006* r0; Ropeobj178006* nilcheck0; Ttype292840* t0; Ttype292840* LOC41; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, x0, (&a0)); dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320)); r0 = rdloc_538188_839829468((&a0)); nilcheck0 = NIM_NIL; t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype292840* LOC16; if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA2; { if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5; nilcheck0 = r0; } LA5: ; { NIM_BOOL LOC9; NIM_BOOL LOC11; TY178507 LOC15; LOC9 = (NIM_BOOL)0; LOC9 = !(((*t0).kind == ((Ttypekind292244) 23))); if (LOC9) goto LA10; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA12: ; LOC9 = !(LOC11); LA10: ; if (!LOC9) goto LA13; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = r0; r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1); } LA13: ; LOC16 = (Ttype292840*)0; LOC16 = lastson_295377_850551059(t0); t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256)); } LA2: ; } { NIM_BOOL LOC19; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA20: ; if (!!(LOC19)) goto LA21; { while (1) { NIM_BOOL LOC25; TY533289 LOC27; Ropeobj178006* LOC28; LOC25 = (NIM_BOOL)0; LOC25 = ((*t0).kind == ((Ttypekind292244) 17)); if (!(LOC25)) goto LA26; LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA26: ; if (!LOC25) goto LA24; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj178006*)0; LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0); add_178482_2381377266(&r0, LOC28); t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA24: ; } } LA21: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = isobjlackingtypefield_533515_839829468(t0); if (!LOC31) goto LA32; globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412)); } LA32: ; { TY532811 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = genofhelper_555140_839829468(p0, dest0, r0); r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2); } goto LA34; LA36: ; { TY178507 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = genofhelper_555140_839829468(p0, dest0, r0); r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1); } LA34: ; LOC41 = (Ttype292840*)0; LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1)); putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s); } N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0); } N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468) { Ropeobj178006* sizeexpr0; Ttype292840* reftype0; Tloc292816 b0; TY535238 args0; Ttype292840* bt0; sizeexpr0 = sizeexpr_554745_839829468; reftype0 = skiptypes_296099_850551059((*a0).t, IL64(211106242013440)); memset((void*)(&b0), 0, sizeof(b0)); initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*a0).t, ((Tstorageloc292812) 3)); { TY178507 LOC5; Ttype292840* LOC6; if (!sizeexpr0 == 0) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (Ttype292840*)0; LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); LOC5[0] = gettypedesc_535673_839829468((*p0).module, LOC6); sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1); } LA3: ; memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_535673_839829468((*p0).module, reftype0); args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0); args0[2] = sizeexpr0; { NIM_BOOL LOC9; TY532811 LOC21; LOC9 = (NIM_BOOL)0; LOC9 = ((*a0).s == ((Tstorageloc292812) 3)); if (!(LOC9)) goto LA10; LOC9 = usesnativegc_169177_2607990831(); LA10: ; if (!LOC9) goto LA11; { NIM_BOOL LOC15; TY178507 LOC18; LOC15 = (NIM_BOOL)0; LOC15 = canformacycle_320123_3876443242((*a0).t); if (!LOC15) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_538188_839829468(a0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1); } goto LA13; LA16: ; { TY178507 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_538188_839829468(a0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1); } LA13: ; b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_538188_839829468(a0); LOC21[1] = rdloc_538188_839829468((&b0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2); } goto LA7; LA11: ; { b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3); genassignment_539264_839829468(p0, a0, (&b0), 0); } LA7: ; bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE); } N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) { Tloc292816 a0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI LOC3; Tloc292816 se0; Ropeobj178006* LOC6; LOC3 = (NI)0; LOC3 = len_293081_850551059(e0); if (!(LOC3 == ((NI) 3))) goto LA4; memset((void*)(&se0), 0, sizeof(se0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0)); LOC6 = (Ropeobj178006*)0; LOC6 = rdloc_538188_839829468((&se0)); rawgennew_554741_839829468(p0, (&a0), LOC6); } goto LA1; LA4: ; { rawgennew_554741_839829468(p0, (&a0), NIM_NIL); } LA1: ; gcusage_554439_839829468(e0); } N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0) { Tloc292816 a0; Tloc292816 b0; Tloc292816 f0; Ttype292840* reftype0; Ttype292840* bt0; Ropeobj178006* ti0; TY532811 LOC1; TY535238 LOC2; Ttype292840* LOC3; Ttype292840* LOC4; Ttype292840* LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&f0), 0, sizeof(f0)); reftype0 = (Ttype292840*)0; bt0 = (Ttype292840*)0; ti0 = (Ropeobj178006*)0; reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0)); initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3)); ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = ti0; LOC1[1] = rdloc_538188_839829468((&f0)); addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_535673_839829468((*p0).module, reftype0); LOC2[1] = ti0; LOC3 = (Ttype292840*)0; LOC3 = lastson_295377_850551059(reftype0); LOC4 = (Ttype292840*)0; LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832)); LOC2[2] = gettypedesc_535673_839829468((*p0).module, LOC4); b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3); genassignment_539264_839829468(p0, (&a0), (&b0), 0); LOC5 = (Ttype292840*)0; LOC5 = lastson_295377_850551059(reftype0); bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832)); genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, (&a0), NIM_FALSE); gcusage_554439_839829468(e0); } N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0) { Ttype292840* seqtype0; TY535238 args0; Tloc292816 call0; seqtype0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440)); memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_535673_839829468((*p0).module, seqtype0); args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0); args0[2] = length0; memset((void*)(&call0), 0, sizeof(call0)); initloc_532273_839829468((&call0), ((Tlockind292808) 6), (*dest0).t, ((Tstorageloc292812) 3)); { NIM_BOOL LOC3; TY532811 LOC15; LOC3 = (NIM_BOOL)0; LOC3 = ((*dest0).s == ((Tstorageloc292812) 3)); if (!(LOC3)) goto LA4; LOC3 = usesnativegc_169177_2607990831(); LA4: ; if (!LOC3) goto LA5; { NIM_BOOL LOC9; TY178507 LOC12; LOC9 = (NIM_BOOL)0; LOC9 = canformacycle_320123_3876443242((*dest0).t); if (!LOC9) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_538188_839829468(dest0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1); } goto LA7; LA10: ; { TY178507 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_538188_839829468(dest0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1); } LA7: ; call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rdloc_538188_839829468(dest0); LOC15[1] = rdloc_538188_839829468((&call0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2); } goto LA1; LA5: ; { call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3); genassignment_539264_839829468(p0, dest0, (&call0), 0); } LA1: ; } N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) { Tloc292816 a0; Tloc292816 b0; Ropeobj178006* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (Ropeobj178006*)0; LOC1 = rdloc_538188_839829468((&b0)); gennewseqaux_554795_839829468(p0, (&a0), LOC1); gcusage_554439_839829468(e0); } N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Ttype292840* seqtype0; Tloc292816 a0; TY535238 LOC1; Ropeobj178006* LOC2; seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = gettypedesc_535673_839829468((*p0).module, seqtype0); LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0); LOC1[2] = rdloc_538188_839829468((&a0)); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); gcusage_554439_839829468(e0); } N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0) { Ropeobj178006* result0; Intset268030 check0; Ropeobj178006* rettype0; Ropeobj178006* desc0; result0 = (Ropeobj178006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_268885_2627731572((&check0)); result0 = gettempname_533598_839829468(m0); rettype0 = (Ropeobj178006*)0; desc0 = (Ropeobj178006*)0; genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535681) 0))), NIM_FALSE); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedtype_533451_839829468(t0); if (!!(LOC3)) goto LA4; { NIM_BOOL LOC8; TY535235 LOC12; LOC8 = (NIM_BOOL)0; LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8))); if (LOC8) goto LA9; LOC8 = !((kind0 == ((Tclosuretypekind535681) 2))); LA9: ; if (!LOC8) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t0).callconv)- 0]); LOC12[1] = rettype0; LOC12[2] = result0; LOC12[3] = desc0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4); } goto LA6; LA10: ; { TY535238 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC14[1] = rettype0; LOC14[2] = desc0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3); } LA6: ; } LA4: ; return result0; } N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* etyp0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832)); { NIM_BOOL LOC3; TY532811 LOC7; Ropeobj178006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((IL64(281475111387152) &((NU64)1<<((NU)((*etyp0).kind)&63U)))!=0); if (!(LOC3)) goto LA4; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ); LOC7[1] = addrloc_538204_839829468((&a0)); LOC8 = (Ropeobj178006*)0; LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s); } goto LA1; LA5: ; { NIM_BOOL LOC10; TY532811 LOC14; Ropeobj178006* LOC15; LOC10 = (NIM_BOOL)0; LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25)); if (!(LOC10)) goto LA11; LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = getclosuretype_535685_839829468((*p0).module, etyp0, ((Tclosuretypekind535681) 1)); LOC14[1] = rdcharloc_538227_839829468((&a0)); LOC15 = (Ropeobj178006*)0; LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } goto LA1; LA12: ; { TY532811 LOC17; Ropeobj178006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ); LOC17[1] = rdcharloc_538227_839829468((&a0)); LOC18 = (Ropeobj178006*)0; LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } LA1: ; } N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; TY178507 LOC1; Ropeobj178006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_538227_839829468((&a0)); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); } N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301)); } N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) { Tnode292802* a0; Ttype292840* typ0; a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; { if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3; a0 = (*a0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864)); switch ((*typ0).kind) { case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { { if (!(op0 == ((Tmagic292524) 8))) goto LA8; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431)); } goto LA6; LA8: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432)); } LA6: ; } break; case ((Ttypekind292244) 29): { usestringh_532345_839829468((*p0).module); { if (!(op0 == ((Tmagic292524) 8))) goto LA14; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433)); } goto LA12; LA14: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434)); } LA12: ; } break; case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA21: ; if (!!(LOC20)) goto LA22; { if (!(op0 == ((Tmagic292524) 8))) goto LA26; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435)); } goto LA24; LA26: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436)); } LA24: ; } goto LA18; LA22: ; { { if (!(op0 == ((Tmagic292524) 8))) goto LA32; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437)); } goto LA30; LA32: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438)); } LA30: ; } LA18: ; } break; case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { { NI64 LOC40; Ropeobj178006* LOC41; if (!(op0 == ((Tmagic292524) 8))) goto LA38; LOC40 = (NI64)0; LOC40 = lastord_320004_3876443242(typ0); LOC41 = (Ropeobj178006*)0; LOC41 = rope_178401_2381377266(LOC40); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0)); } goto LA36; LA38: ; { NI64 LOC43; Ropeobj178006* LOC44; LOC43 = (NI64)0; LOC43 = lengthord_320007_3876443242(typ0); LOC44 = (Ropeobj178006*)0; LOC44 = rope_178401_2381377266(LOC43); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0)); } LA36: ; } break; default: { internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439)); } break; } } N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; TY178507 LOC5; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442)); } LA3: ; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&a0)); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1); } N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445)); gcusage_554439_839829468(e0); } N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* t0; NimStringDesc* setlenpattern0; TY535235 LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446)); } goto LA1; LA5: ; { setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447)); } LA1: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468((&a0)); LOC8[1] = rdloc_538188_839829468((&b0)); LOC8[2] = gettypedesc_535673_839829468((*p0).module, t0); LOC8[3] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4); gcusage_554439_839829468(e0); } N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = rdcharloc_538227_839829468(a0); { NI64 LOC3; TY532811 LOC6; NI64 LOC7; LOC3 = (NI64)0; LOC3 = firstord_320001_3876443242(settype0); if (!!((LOC3 == IL64(0)))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; LOC7 = (NI64)0; LOC7 = firstord_320001_3876443242(settype0); LOC6[1] = rope_178401_2381377266(LOC7); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2); } LA4: ; return result0; } N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; Tloc292816 b0; TY532811 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); LOC1[1] = rdsetelemloc_555662_839829468((&b0), a0.t); linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2); } N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) { Tloc292816 a0; Tloc292816 b0; TY532811 LOC1; Ropeobj178006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_538227_839829468((&a0)); LOC1[1] = rdcharloc_538227_839829468((&b0)); LOC2 = (Ropeobj178006*)0; LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); } N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3; internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463)); } LA3: ; { NIM_BOOL LOC7; NI64 LOC8; LOC7 = (NIM_BOOL)0; LOC8 = (NI64)0; LOC8 = getsize_320135_3876443242((*s0).typ); LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050))); if (!(LOC7)) goto LA9; LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0); LA9: ; if (!LOC7) goto LA10; result0 = NIM_FALSE; } goto LA5; LA10: ; { Ttype292840* LOC13; LOC13 = (Ttype292840*)0; LOC13 = elemtype_320394_3876443242((*s0).typ); if (!((IL64(62277025792) &((NU64)1<<((NU)((*LOC13).kind)&63U)))!=0)) goto LA14; result0 = NIM_TRUE; } goto LA5; LA14: ; { NI LOC17; LOC17 = (NI)0; LOC17 = sonslen_295351_850551059(s0); result0 = (LOC17 <= ((NI) 8)); } LA5: ; return result0; } N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) { TY532811 LOC1; Ropeobj178006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&(*a0))); LOC1[1] = rdsetelemloc_555662_839829468((&(*b0)), (*a0).t); LOC2 = (Ropeobj178006*)0; LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0)); } N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) { Ttype292840* LOC1; NI64 LOC2; LOC1 = (Ttype292840*)0; LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC2 = (NI64)0; LOC2 = getsize_320135_3876443242(LOC1); switch (((NI) (LOC2))) { case ((NI) 1): { binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467)); } break; case ((NI) 2): { binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468)); } break; case ((NI) 4): { binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469)); } break; case ((NI) 8): { binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470)); } break; default: { binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471)); } break; } } N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Tloc292816 x0; Tloc292816 y0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); { NIM_BOOL LOC3; Tnode292802* ea0; NI length0; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39)); if (!(LOC3)) goto LA4; LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]); LA4: ; if (!LOC3) goto LA5; { if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9; ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)]; } goto LA7; LA9: ; { ea0 = (*e0).kindU.S6.sons->data[((NI) 2)]; } LA7: ; initlocexpr_539283_839829468(p0, ea0, (&a0)); initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0)); b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118)); length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]); { NI i_556061_839829468; NI HEX3Atmp_556412_839829468; NI res_556415_839829468; i_556061_839829468 = (NI)0; HEX3Atmp_556412_839829468 = (NI)0; HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1)); res_556415_839829468 = ((NI) 0); { while (1) { if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14; i_556061_839829468 = res_556415_839829468; { TY535238 LOC19; if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17; initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdcharloc_538227_839829468((&a0)); LOC19[1] = rdcharloc_538227_839829468((&x0)); LOC19[2] = rdcharloc_538227_839829468((&y0)); addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3); } goto LA15; LA17: ; { TY532811 LOC21; initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdcharloc_538227_839829468((&a0)); LOC21[1] = rdcharloc_538227_839829468((&x0)); addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2); } LA15: ; { if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24; add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466)); } LA24: ; res_556415_839829468 += ((NI) 1); } LA14: ; } } add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117)); putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0)); } goto LA1; LA5: ; { initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0); } LA1: ; } N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) { Tloc292816 a0; Tloc292816 b0; Tloc292816 i0; Ttype292840* settype0; NI size0; NI64 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&i0), 0, sizeof(i0)); settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC1 = (NI64)0; LOC1 = getsize_320135_3876443242(settype0); size0 = ((NI) (LOC1)); switch (size0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { switch (op0) { case ((Tmagic292524) 39): { NimStringDesc* ts0; NimStringDesc* LOC4; NimStringDesc* LOC5; NimStringDesc* LOC6; LOC4 = (NimStringDesc*)0; LOC5 = (NimStringDesc*)0; LOC5 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC4 = rawNewString(LOC5->Sup.len + 2); appendString(LOC4, ((NimStringDesc*) &T839829468_45)); appendString(LOC4, LOC5); ts0 = LOC4; LOC6 = (NimStringDesc*)0; LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35); appendString(LOC6, ((NimStringDesc*) &T839829468_449)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_450)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_451)); binarystmtinexcl_555858_839829468(p0, e0, d0, LOC6); } break; case ((Tmagic292524) 40): { NimStringDesc* ts0; NimStringDesc* LOC8; NimStringDesc* LOC9; NimStringDesc* LOC10; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC8 = rawNewString(LOC9->Sup.len + 2); appendString(LOC8, ((NimStringDesc*) &T839829468_45)); appendString(LOC8, LOC9); ts0 = LOC8; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42); appendString(LOC10, ((NimStringDesc*) &T839829468_452)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_453)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_454)); binarystmtinexcl_555858_839829468(p0, e0, d0, LOC10); } break; case ((Tmagic292524) 41): { { if (!(size0 <= ((NI) 4))) goto LA14; unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455)); } goto LA12; LA14: ; { unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456)); } LA12: ; } break; case ((Tmagic292524) 133): { binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457)); } break; case ((Tmagic292524) 132): { binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458)); } break; case ((Tmagic292524) 131): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } break; case ((Tmagic292524) 134): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459)); } break; case ((Tmagic292524) 135): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460)); } break; case ((Tmagic292524) 136): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461)); } break; case ((Tmagic292524) 137): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462)); } break; case ((Tmagic292524) 148): { geninop_556009_839829468(p0, e0, d0); } break; default: { internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472)); } break; } } break; default: { switch (op0) { case ((Tmagic292524) 39): { binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473)); } break; case ((Tmagic292524) 40): { binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474)); } break; case ((Tmagic292524) 41): { NimStringDesc* LOC30; NimStringDesc* LOC31; LOC30 = (NimStringDesc*)0; LOC31 = (NimStringDesc*)0; LOC31 = nimIntToStr(size0); LOC30 = rawNewString(LOC31->Sup.len + 14); appendString(LOC30, ((NimStringDesc*) &T839829468_475)); appendString(LOC30, LOC31); appendChar(LOC30, 41); unaryexprchar_551222_839829468(p0, e0, d0, LOC30); } break; case ((Tmagic292524) 133): case ((Tmagic292524) 132): { Ttype292840* LOC33; TY536475 LOC39; LOC33 = (Ttype292840*)0; LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype292840* LOC38; if (!((*d0).k == ((Tlockind292808) 0))) goto LA36; LOC38 = (Ttype292840*)0; LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1)); gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE); } LA36: ; memset((void*)LOC39, 0, sizeof(LOC39)); LOC39[0] = rdloc_538188_839829468((&i0)); LOC39[1] = rope_178401_2381377266(((NI64) (size0))); LOC39[2] = rdloc_538188_839829468((&(*d0))); LOC39[3] = rdloc_538188_839829468((&a0)); LOC39[4] = rdloc_538188_839829468((&b0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5); } break; case ((Tmagic292524) 131): { NimStringDesc* LOC41; NimStringDesc* LOC42; usestringh_532345_839829468((*p0).module); LOC41 = (NimStringDesc*)0; LOC42 = (NimStringDesc*)0; LOC42 = nimIntToStr(size0); LOC41 = rawNewString(LOC42->Sup.len + 21); appendString(LOC41, ((NimStringDesc*) &T839829468_481)); appendString(LOC41, LOC42); appendString(LOC41, ((NimStringDesc*) &T839829468_482)); binaryexprchar_550809_839829468(p0, e0, d0, LOC41); } break; case ((Tmagic292524) 134): case ((Tmagic292524) 135): case ((Tmagic292524) 136): case ((Tmagic292524) 137): { Ttype292840* LOC44; TY536847 LOC49; LOC44 = (Ttype292840*)0; LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA47; gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE); } LA47: ; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_538188_839829468((&i0)); LOC49[1] = rope_178401_2381377266(((NI64) (size0))); LOC49[2] = rdloc_538188_839829468((&(*d0))); LOC49[3] = rdloc_538188_839829468((&a0)); LOC49[4] = rdloc_538188_839829468((&b0)); LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6); } break; case ((Tmagic292524) 148): { geninop_556009_839829468(p0, e0, d0); } break; default: { internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484)); } break; } } break; } } static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; Tloc292816 a0; TY178507 LOC1; result0 = (Ropeobj178006*)0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1); return result0; } N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; Tloc292816 a0; Tnode292802* q0; result0 = (Ropeobj178006*)0; memset((void*)(&a0), 0, sizeof(a0)); q0 = skipconv_328882_3876443242(n0); { Tmagic292524 LOC3; Tloc292816 b0; Tloc292816 c0; Tnode292802* LOC6; Tnode292802* LOC7; Tnode292802* LOC8; NimStringDesc* fmt0; Ttype292840* LOC9; TY535238 LOC25; LOC3 = (Tmagic292524)0; LOC3 = getmagic_318502_2616423590(q0); if (!(LOC3 == ((Tmagic292524) 139))) goto LA4; memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&c0), 0, sizeof(c0)); LOC6 = (Tnode292802*)0; LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1)); initlocexpr_539283_839829468(p0, LOC6, (&a0)); LOC7 = (Tnode292802*)0; LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2)); initlocexpr_539283_839829468(p0, LOC7, (&b0)); LOC8 = (Tnode292802*)0; LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3)); initlocexpr_539283_839829468(p0, LOC8, (&c0)); LOC9 = (Ttype292840*)0; LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016)); switch ((*LOC9).kind) { case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { fmt0 = copyString(((NimStringDesc*) &T839829468_486)); } break; case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { { NIM_BOOL LOC14; Ttype292840* LOC15; NIM_BOOL LOC17; LOC14 = (NIM_BOOL)0; LOC15 = (Ttype292840*)0; LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256)); LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23)); if (!(LOC14)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC17) goto LA18; LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA18: ; LOC14 = !(LOC17); LA16: ; if (!LOC14) goto LA19; fmt0 = copyString(((NimStringDesc*) &T839829468_487)); } goto LA12; LA19: ; { fmt0 = copyString(((NimStringDesc*) &T839829468_488)); } LA12: ; } break; default: { NimStringDesc* LOC23; NimStringDesc* LOC24; LOC23 = (NimStringDesc*)0; LOC24 = (NimStringDesc*)0; LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0)); LOC23 = rawNewString(LOC24->Sup.len + 14); appendString(LOC23, ((NimStringDesc*) &T839829468_489)); appendString(LOC23, LOC24); internalerror_196113_155036129(LOC23); fmt0 = copyString(((NimStringDesc*) &T839829468_490)); } break; } memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_538188_839829468((&a0)); LOC25[1] = rdloc_538188_839829468((&b0)); LOC25[2] = rdloc_538188_839829468((&c0)); result0 = HEX25_178905_2381377266(fmt0, LOC25, 3); } goto LA1; LA4: ; { Ttype292840* LOC27; initlocexpr_539283_839829468(p0, n0, (&a0)); LOC27 = (Ttype292840*)0; LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864)); switch ((*LOC27).kind) { case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { TY178507 LOC29; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_538188_839829468((&a0)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1); } break; case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { { NIM_BOOL LOC33; Ttype292840* LOC34; NIM_BOOL LOC36; TY532811 LOC40; LOC33 = (NIM_BOOL)0; LOC34 = (Ttype292840*)0; LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256)); LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23)); if (!(LOC33)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC36) goto LA37; LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA37: ; LOC33 = !(LOC36); LA35: ; if (!LOC33) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = rdloc_538188_839829468((&a0)); LOC40[1] = lenfield_539305_839829468(p0); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2); } goto LA31; LA38: ; { TY532811 LOC42; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rdloc_538188_839829468((&a0)); LOC42[1] = lenfield_539305_839829468(p0); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2); } LA31: ; } break; case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { TY532811 LOC44; NI64 LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_538188_839829468((&a0)); LOC45 = (NI64)0; LOC45 = lengthord_320007_3876443242(a0.t); LOC44[1] = rope_178401_2381377266(LOC45); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2); } break; case ((Ttypekind292244) 21): case ((Ttypekind292244) 22): { Ttype292840* LOC47; LOC47 = (Ttype292840*)0; LOC47 = lastson_295377_850551059(a0.t); switch ((*LOC47).kind) { case ((Ttypekind292244) 28): case ((Ttypekind292244) 24): { TY532811 LOC49; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_538188_839829468((&a0)); LOC49[1] = lenfield_539305_839829468(p0); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2); } break; case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { TY532811 LOC51; Ttype292840* LOC52; NI64 LOC53; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_538188_839829468((&a0)); LOC52 = (Ttype292840*)0; LOC52 = lastson_295377_850551059(a0.t); LOC53 = (NI64)0; LOC53 = lengthord_320007_3876443242(LOC52); LOC51[1] = rope_178401_2381377266(LOC53); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2); } break; default: { NimStringDesc* LOC55; NimStringDesc* LOC56; LOC55 = (NimStringDesc*)0; LOC56 = (NimStringDesc*)0; LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0)); LOC55 = rawNewString(LOC56->Sup.len + 14); appendString(LOC55, ((NimStringDesc*) &T839829468_489)); appendString(LOC55, LOC56); internalerror_196113_155036129(LOC55); } break; } } break; default: { NimStringDesc* LOC58; NimStringDesc* LOC59; LOC58 = (NimStringDesc*)0; LOC59 = (NimStringDesc*)0; LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0)); LOC58 = rawNewString(LOC59->Sup.len + 14); appendString(LOC58, ((NimStringDesc*) &T839829468_489)); appendString(LOC58, LOC59); internalerror_196113_155036129(LOC58); } break; } } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) { Ropeobj178006* result0; Tloc292816 a0; result0 = (Ropeobj178006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3; result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468); } goto LA1; LA3: ; { Ttype292840* LOC6; Tnode292802* n0; LOC6 = (Ttype292840*)0; LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864)); if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC6).kind)&63U)))!=0)) goto LA7; { if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11; n0 = n_539790_839829468; } goto LA9; LA11: ; { n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)]; } LA9: ; result0 = openarrayloc_539665_839829468(p0, n0); } goto LA1; LA7: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ccgintroducedptr_533611_839829468(param0); if (!LOC15) goto LA16; initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0)); result0 = addrloc_538204_839829468((&a0)); } goto LA1; LA16: ; { NIM_BOOL LOC19; NIM_BOOL LOC20; NIM_BOOL LOC21; Tnode292802* callee0; LOC19 = (NIM_BOOL)0; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC21) goto LA22; LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23)); LA23: ; LOC19 = LOC20; if (!(LOC19)) goto LA24; LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64)); LA24: ; if (!LOC19) goto LA25; initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0)); callee0 = (*call0).kindU.S6.sons->data[((NI) 0)]; { NIM_BOOL LOC29; NIM_BOOL LOC30; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*callee0).kind == ((Tnodekind292020) 3)); if (!(LOC30)) goto LA31; LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0)); LA32: ; if (!LOC29) goto LA33; result0 = addrloc_538204_839829468((&a0)); } goto LA27; LA33: ; { result0 = rdloc_538188_839829468((&a0)); } LA27: ; } goto LA1; LA25: ; { initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0)); result0 = rdloc_538188_839829468((&a0)); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; Tloc292816 a0; result0 = (Ropeobj178006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3; result0 = genargstringtocstring_539776_839829468(p0, n0); } goto LA1; LA3: ; { initlocexprsingleuse_539289_839829468(p0, n0, (&a0)); result0 = rdloc_538188_839829468((&a0)); } LA1: ; return result0; } N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = getclosuretype_535685_839829468((*p0).module, t0, ((Tclosuretypekind535681) 0)); return result0; } N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!!((le0 == NIM_NIL))) goto LA3; { NI i_539364_839829468; NI HEX3Atmp_539376_839829468; NI LOC6; NI res_539379_839829468; i_539364_839829468 = (NI)0; HEX3Atmp_539376_839829468 = (NI)0; LOC6 = (NI)0; LOC6 = len_293081_850551059(ri0); HEX3Atmp_539376_839829468 = (LOC6 - 1); res_539379_839829468 = ((NI) 1); { while (1) { Tnode292802* r0; if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8; i_539364_839829468 = res_539379_839829468; r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468); { Tanalysisresult473003 LOC11; LOC11 = (Tanalysisresult473003)0; LOC11 = ispartof_473340_788060399(le0, r0); if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12; result0 = NIM_TRUE; goto BeforeRet; } LA12: ; res_539379_839829468 += ((NI) 1); } LA8: ; } } } LA3: ; }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC1)) goto LA2; LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) { NIM_BOOL containsgcref0; Ttype292840* typ0; { containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t); typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedcpptype_533478_839829468(typ0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscomplexvaluetype_538317_839829468(typ0); if (!!(LOC8)) goto LA9; { Tloc292816 nilloc0; if (!containsgcref0) goto LA13; memset((void*)(&nilloc0), 0, sizeof(nilloc0)); initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2)); nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174)); genrefassign_538311_839829468(p0, (&(*loc0)), (&nilloc0), 8); } goto LA11; LA13: ; { TY178507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468((&(*loc0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1); } LA11: ; } goto LA6; LA9: ; { { TY178507 LOC22; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = addrloc_538204_839829468((&(*loc0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1); } LA20: ; { TY532811 LOC27; if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = addrloc_538204_839829468((&(*loc0))); LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2); genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE); } goto LA23; LA25: ; { TY532811 LOC29; usestringh_532345_839829468((*p0).module); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = addrloc_538204_839829468((&(*loc0))); LOC29[1] = rdloc_538188_839829468((&(*loc0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2); genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE); } LA23: ; } LA6: ; }BeforeRet: ; } N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { if (!(r0 == NIM_NIL)) goto LA3; result0 = r0; } goto LA1; LA3: ; { TY533289 LOC6; Ropeobj178006* LOC7; memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj178006*)0; LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0); result0 = HEX26_178418_2381377266(r0, LOC7); } LA1: ; return result0; } N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) { Tloc292816 op0; Ropeobj178006* pl0; Ttype292840* typ0; NI length0; Ropeobj178006* rawproc0; NimStringDesc* callpattern0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); pl0 = (Ropeobj178006*)0; typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_295351_850551059(ri0); { NI i_540613_839829468; NI HEX3Atmp_541214_839829468; NI res_541217_839829468; i_540613_839829468 = (NI)0; HEX3Atmp_541214_839829468 = (NI)0; HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1)); res_541217_839829468 = ((NI) 1); { while (1) { if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3; i_540613_839829468 = res_541217_839829468; { NI LOC6; Tnode292802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_295327_850551059(typ0); if (!(i_540613_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468]; { NIM_BOOL LOC11; Ropeobj178006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY533289 LOC18; Ropeobj178006* LOC19; if (!!((pl0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj178006*)0; LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_178482_2381377266(&pl0, LOC19); } LA16: ; LOC20 = (Ropeobj178006*)0; LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0); add_178482_2381377266(&pl0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj178006* LOC28; { TY533289 LOC26; Ropeobj178006* LOC27; if (!!((pl0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj178006*)0; LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_178482_2381377266(&pl0, LOC27); } LA24: ; LOC28 = (Ropeobj178006*)0; LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]); add_178482_2381377266(&pl0, LOC28); } LA4: ; res_541217_839829468 += ((NI) 1); } LA3: ; } } rawproc0 = getrawproctype_540459_839829468(p0, typ0); { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31; callpattern0 = copyString(((NimStringDesc*) &T839829468_492)); } goto LA29; LA31: ; { callpattern0 = copyString(((NimStringDesc*) &T839829468_493)); } LA29: ; { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36; { NIM_BOOL LOC40; LOC40 = (NIM_BOOL)0; LOC40 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC40) goto LA41; { NI LOC45; TY533289 LOC48; Ropeobj178006* LOC49; LOC45 = (NI)0; LOC45 = sonslen_295351_850551059(ri0); if (!(((NI) 1) < LOC45)) goto LA46; memset((void*)LOC48, 0, sizeof(LOC48)); LOC49 = (Ropeobj178006*)0; LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0); add_178482_2381377266(&pl0, LOC49); } LA46: ; { NIM_BOOL LOC52; NIM_BOOL LOC54; Ropeobj178006* LOC67; NimStringDesc* LOC68; TY535235 LOC69; LOC52 = (NIM_BOOL)0; LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC52) goto LA53; LOC54 = (NIM_BOOL)0; LOC54 = leftappearsonrightside_539329_839829468(le0, ri0); LOC52 = !(LOC54); LA53: ; if (!LOC52) goto LA55; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA59; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA57; LA59: ; { NIM_BOOL LOC62; NIM_BOOL LOC64; LOC62 = (NIM_BOOL)0; LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC62)) goto LA63; LOC64 = (NIM_BOOL)0; LOC64 = hasnoinit_539383_839829468(ri0); LOC62 = !(LOC64); LA63: ; if (!LOC62) goto LA65; resetloc_538350_839829468(p0, d0); } goto LA57; LA65: ; LA57: ; LOC67 = (Ropeobj178006*)0; LOC67 = addrloc_538204_839829468((&(*d0))); add_178482_2381377266(&pl0, LOC67); LOC68 = (NimStringDesc*)0; LOC68 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC68, callpattern0); appendString(LOC68, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = op0.r; LOC69[1] = pl0; LOC69[2] = addcomma_540464_839829468(pl0); LOC69[3] = rawproc0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4); } goto LA50; LA55: ; { Tloc292816 tmp0; Ropeobj178006* LOC71; NimStringDesc* LOC72; TY535235 LOC73; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC71 = (Ropeobj178006*)0; LOC71 = addrloc_538204_839829468((&tmp0)); add_178482_2381377266(&pl0, LOC71); LOC72 = (NimStringDesc*)0; LOC72 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC72, callpattern0); appendString(LOC72, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = op0.r; LOC73[1] = pl0; LOC73[2] = addcomma_540464_839829468(pl0); LOC73[3] = rawproc0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4); genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA50: ; } goto LA38; LA41: ; { Tloc292816 list0; TY535235 LOC79; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA77; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA77: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0)); memset((void*)LOC79, 0, sizeof(LOC79)); LOC79[0] = op0.r; LOC79[1] = pl0; LOC79[2] = addcomma_540464_839829468(pl0); LOC79[3] = rawproc0; list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4); genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0); } LA38: ; } goto LA34; LA36: ; { NimStringDesc* LOC81; TY535235 LOC82; LOC81 = (NimStringDesc*)0; LOC81 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC81, callpattern0); appendString(LOC81, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC82, 0, sizeof(LOC82)); LOC82[0] = op0.r; LOC82[1] = pl0; LOC82[2] = addcomma_540464_839829468(pl0); LOC82[3] = rawproc0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4); } LA34: ; } N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NI LOC3; Tnode292802* paramtype0; LOC3 = (NI)0; LOC3 = sonslen_295327_850551059(typ0); if (!(i0 < LOC3)) goto LA4; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0]; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ); if (!LOC8) goto LA9; result0 = NIM_NIL; } goto LA6; LA9: ; { NIM_BOOL LOC12; Tnode292802* LOC16; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23)); if (!(LOC12)) goto LA13; LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64)); LA13: ; if (!LOC12) goto LA14; LOC16 = (Tnode292802*)0; LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0)); result0 = genargnoparam_539938_839829468(p0, LOC16); } goto LA6; LA14: ; { result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA6: ; } goto LA1; LA4: ; { { if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21; localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501)); result0 = NIM_NIL; } goto LA19; LA21: ; { result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA19: ; } LA1: ; return result0; } N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) { Tnode292802* result0; Tnode292802* n0; NIM_BOOL isaddr0; { result0 = (Tnode292802*)0; n0 = node0; isaddr0 = NIM_FALSE; switch ((*n0).kind) { case ((Tnodekind292020) 63): case ((Tnodekind292020) 64): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; isaddr0 = NIM_TRUE; } break; case ((Tnodekind292020) 47): case ((Tnodekind292020) 65): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } break; default: { result0 = n0; goto BeforeRet; } break; } { if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6; n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } LA6: ; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isaddr0; if (!(LOC10)) goto LA11; LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65)); LA11: ; if (!LOC10) goto LA12; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA12: ; { if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA15: ; { result0 = node0; } LA8: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) { Ropeobj178006* result0; Tnode292802* ri0; Ttype292840* t0; result0 = (Ropeobj178006*)0; { NI LOC3; NimStringDesc* LOC6; LOC3 = (NI)0; LOC3 = sonslen_295327_850551059(typ0); if (!!((i0 < LOC3))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_196185_1689653243(T839829468_503); internalerror_196113_155036129(LOC6); } LA4: ; ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0); { while (1) { if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8; ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0)); } LA8: ; } t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048); { Tnode292802* x0; if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11; { if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15; x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0)); } goto LA13; LA15: ; { x0 = ri0; } LA13: ; { if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20; result0 = genargnoparam_539938_839829468(p0, x0); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA20: ; { NIM_BOOL LOC23; Tnode292802* LOC25; Tnode292802* LOC28; LOC23 = (NIM_BOOL)0; LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47)); if (!(LOC23)) goto LA24; LOC25 = (Tnode292802*)0; LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0)); LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21)); LA24: ; if (!LOC23) goto LA26; LOC28 = (Tnode292802*)0; LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0)); result0 = genargnoparam_539938_839829468(p0, LOC28); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA26: ; { result0 = genargnoparam_539938_839829468(p0, x0); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA18: ; } goto LA9; LA11: ; { if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31; { Tnode292802* LOC37; if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35; LOC37 = (Tnode292802*)0; LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0)); result0 = genargnoparam_539938_839829468(p0, LOC37); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } goto LA33; LA35: ; { result0 = genargnoparam_539938_839829468(p0, ri0); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } LA33: ; } goto LA9; LA31: ; { ri0 = skipaddrderef_541433_839829468(ri0); { if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42; ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0)); } LA42: ; result0 = genargnoparam_539938_839829468(p0, ri0); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA9: ; return result0; } N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) { Ropeobj178006* result0; NI i0; NI j0; result0 = (Ropeobj178006*)0; i0 = ((NI) 0); j0 = ((NI) 1); { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2; switch (((NU8)(pat0->data[i0]))) { case 64: { { NI LOC6; Ropeobj178006* LOC9; LOC6 = (NI)0; LOC6 = len_293081_850551059(ri_541702_839829468); if (!(j0 < LOC6)) goto LA7; LOC9 = (Ropeobj178006*)0; LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468); add_178482_2381377266(&result0, LOC9); { NI k_541728_839829468; NI HEX3Atmp_541904_839829468; NI HEX3Atmp_541905_839829468; NI LOC11; NI res_541908_839829468; k_541728_839829468 = (NI)0; HEX3Atmp_541904_839829468 = (NI)0; HEX3Atmp_541905_839829468 = (NI)0; HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1)); LOC11 = (NI)0; LOC11 = len_293081_850551059(ri_541702_839829468); HEX3Atmp_541905_839829468 = (LOC11 - 1); res_541908_839829468 = HEX3Atmp_541904_839829468; { while (1) { TY533289 LOC14; Ropeobj178006* LOC15; Ropeobj178006* LOC16; if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13; k_541728_839829468 = res_541908_839829468; memset((void*)LOC14, 0, sizeof(LOC14)); LOC15 = (Ropeobj178006*)0; LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0); add_178482_2381377266(&result0, LOC15); LOC16 = (Ropeobj178006*)0; LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468); add_178482_2381377266(&result0, LOC16); res_541908_839829468 += ((NI) 1); } LA13: ; } } } LA7: ; i0 += ((NI) 1); } break; case 35: { { Tnode292802* ri0; if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20; ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0); { Ttype292840* typ0; TY533289 LOC31; Ropeobj178006* LOC32; TY533289 LOC46; Ropeobj178006* LOC47; if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24; typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { Ropeobj178006* LOC30; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28; LOC30 = (Ropeobj178006*)0; LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]); add_178482_2381377266(&result0, LOC30); } LA28: ; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0); add_178482_2381377266(&result0, LOC32); { NI LOC35; Ropeobj178006* LOC38; LOC35 = (NI)0; LOC35 = len_293081_850551059(ri0); if (!(((NI) 1) < LOC35)) goto LA36; LOC38 = (Ropeobj178006*)0; LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0); add_178482_2381377266(&result0, LOC38); } LA36: ; { NI k_541793_839829468; NI HEX3Atmp_541915_839829468; NI HEX3Atmp_541916_839829468; NI LOC40; NI res_541919_839829468; k_541793_839829468 = (NI)0; HEX3Atmp_541915_839829468 = (NI)0; HEX3Atmp_541916_839829468 = (NI)0; HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1)); LOC40 = (NI)0; LOC40 = len_293081_850551059(ri0); HEX3Atmp_541916_839829468 = (LOC40 - 1); res_541919_839829468 = HEX3Atmp_541915_839829468; { while (1) { TY533289 LOC43; Ropeobj178006* LOC44; Ropeobj178006* LOC45; if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42; k_541793_839829468 = res_541919_839829468; memset((void*)LOC43, 0, sizeof(LOC43)); LOC44 = (Ropeobj178006*)0; LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0); add_178482_2381377266(&result0, LOC44); LOC45 = (Ropeobj178006*)0; LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0); add_178482_2381377266(&result0, LOC45); res_541919_839829468 += ((NI) 1); } LA42: ; } } memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (Ropeobj178006*)0; LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0); add_178482_2381377266(&result0, LOC47); } goto LA22; LA24: ; { localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502)); } LA22: ; i0 += ((NI) 1); } goto LA18; LA20: ; { Ropeobj178006* LOC52; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50; LOC52 = (Ropeobj178006*)0; LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468); add_178482_2381377266(&result0, LOC52); i0 += ((NI) 1); } goto LA18; LA50: ; { Tnode292802* arg0; Ropeobj178006* LOC58; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54; arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]); { while (1) { if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57; arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0)); } LA57: ; } LOC58 = (Ropeobj178006*)0; LOC58 = genargnoparam_539938_839829468(p0, arg0); add_178482_2381377266(&result0, LOC58); } goto LA18; LA54: ; { Ropeobj178006* LOC60; LOC60 = (Ropeobj178006*)0; LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468); add_178482_2381377266(&result0, LOC60); } LA18: ; j0 += ((NI) 1); i0 += ((NI) 1); } break; case 39: { NI idx0; NI stars0; idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC64; Ttype292840* t0; LOC64 = (NIM_BOOL)0; LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0)); if (!LOC64) goto LA65; t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0); { TY533289 LOC71; Ropeobj178006* LOC72; if (!(t0 == NIM_NIL)) goto LA69; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj178006*)0; LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0); add_178482_2381377266(&result0, LOC72); } goto LA67; LA69: ; { Ropeobj178006* LOC74; LOC74 = (Ropeobj178006*)0; LOC74 = gettypedesc_535673_839829468((*p0).module, t0); add_178482_2381377266(&result0, LOC74); } LA67: ; } LA65: ; } break; default: { NI start0; start0 = i0; { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77; { if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80; i0 += ((NI) 1); } goto LA78; LA80: ; { goto LA76; } LA78: ; } LA77: ; } LA76: ; { NimStringDesc* LOC87; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85; LOC87 = (NimStringDesc*)0; LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1))); add_178487_2381377266(&result0, LOC87); } LA85: ; } break; } } LA2: ; } return result0; } N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) { Ropeobj178006* pl0; TY533289 LOC1; Ropeobj178006* LOC2; Ropeobj178006* LOC3; Ttype292840* typ0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj178006*)0; LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0); LOC3 = (Ropeobj178006*)0; LOC3 = HEX26_178418_2381377266(callee0, LOC2); pl0 = HEX26_178418_2381377266(LOC3, params0); typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC10) goto LA11; { TY533289 LOC17; Ropeobj178006* LOC18; if (!!((params0 == NIM_NIL))) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC18 = (Ropeobj178006*)0; LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0); add_178482_2381377266(&pl0, LOC18); } LA15: ; { NIM_BOOL LOC21; NIM_BOOL LOC23; Ropeobj178006* LOC36; TY533289 LOC37; Ropeobj178006* LOC38; LOC21 = (NIM_BOOL)0; LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC21) goto LA22; LOC23 = (NIM_BOOL)0; LOC23 = leftappearsonrightside_539329_839829468(le0, ri0); LOC21 = !(LOC23); LA22: ; if (!LOC21) goto LA24; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA28; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA26; LA28: ; { NIM_BOOL LOC31; NIM_BOOL LOC33; LOC31 = (NIM_BOOL)0; LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC31)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = hasnoinit_539383_839829468(ri0); LOC31 = !(LOC33); LA32: ; if (!LOC31) goto LA34; resetloc_538350_839829468(p0, d0); } goto LA26; LA34: ; LA26: ; LOC36 = (Ropeobj178006*)0; LOC36 = addrloc_538204_839829468((&(*d0))); add_178482_2381377266(&pl0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj178006*)0; LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0); add_178482_2381377266(&pl0, LOC38); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); } goto LA19; LA24: ; { Tloc292816 tmp0; Ropeobj178006* LOC40; TY533289 LOC41; Ropeobj178006* LOC42; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC40 = (Ropeobj178006*)0; LOC40 = addrloc_538204_839829468((&tmp0)); add_178482_2381377266(&pl0, LOC40); memset((void*)LOC41, 0, sizeof(LOC41)); LOC42 = (Ropeobj178006*)0; LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0); add_178482_2381377266(&pl0, LOC42); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA19: ; } goto LA8; LA11: ; { TY533289 LOC44; Ropeobj178006* LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj178006*)0; LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0); add_178482_2381377266(&pl0, LOC45); { NIM_BOOL LOC48; NIM_BOOL LOC49; LOC48 = (NIM_BOOL)0; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA50: ; LOC48 = LOC49; if (!(LOC48)) goto LA51; LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0); LA51: ; if (!LOC48) goto LA52; (*d0).k = ((Tlockind292808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8))); } goto LA46; LA52: ; { Tloc292816 list0; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA57; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA57: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0)); list0.r = pl0; genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0); } LA46: ; } LA8: ; } goto LA4; LA6: ; { TY533289 LOC60; Ropeobj178006* LOC61; memset((void*)LOC60, 0, sizeof(LOC60)); LOC61 = (Ropeobj178006*)0; LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0); add_178482_2381377266(&pl0, LOC61); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); } LA4: ; } N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) { Tloc292816 op0; Ttype292840* typ_541940_839829468; NI length0; NimStringDesc* pat0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_295351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC5; if (!!(!((pat0 == NIM_NIL)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_196185_1689653243(T839829468_498); internalerror_196113_155036129(LOC5); } LA3: ; { NIM_BOOL LOC8; Ropeobj178006* pl0; Ttype292840* typ0; LOC8 = (NIM_BOOL)0; LOC8 = contains_110056_4286263276(pat0, T839829468_500); if (!LOC8) goto LA9; pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468); typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0); LA20: ; if (!LOC17) goto LA21; (*d0).k = ((Tlockind292808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8))); } goto LA15; LA21: ; { Tloc292816 list0; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA26; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA26: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0)); list0.r = pl0; genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0); } LA15: ; } goto LA11; LA13: ; { TY533289 LOC29; Ropeobj178006* LOC30; memset((void*)LOC29, 0, sizeof(LOC29)); LOC30 = (Ropeobj178006*)0; LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0); add_178482_2381377266(&pl0, LOC30); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); } LA11: ; } goto LA6; LA9: ; { Ropeobj178006* pl0; Ropeobj178006* params0; pl0 = NIM_NIL; { NI LOC34; Ropeobj178006* LOC37; LOC34 = (NI)0; LOC34 = len_293081_850551059(ri0); if (!(((NI) 1) < LOC34)) goto LA35; LOC37 = (Ropeobj178006*)0; LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468); add_178482_2381377266(&pl0, LOC37); } LA35: ; add_178482_2381377266(&pl0, op0.r); params0 = (Ropeobj178006*)0; { NI i_542425_839829468; NI HEX3Atmp_542609_839829468; NI res_542612_839829468; i_542425_839829468 = (NI)0; HEX3Atmp_542609_839829468 = (NI)0; HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1)); res_542612_839829468 = ((NI) 2); { while (1) { Ropeobj178006* LOC47; if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40; i_542425_839829468 = res_542612_839829468; { TY533289 LOC45; Ropeobj178006* LOC46; if (!!((params0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj178006*)0; LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0); add_178482_2381377266(&params0, LOC46); } LA43: ; LOC47 = (Ropeobj178006*)0; LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468); add_178482_2381377266(&params0, LOC47); res_542612_839829468 += ((NI) 1); } LA40: ; } } fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0); } LA6: ; } N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) { Tloc292816 op0; Ropeobj178006* pl0; TY533289 LOC1; Ttype292840* typ0; NI length0; NimStringDesc* pat0; NI start0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); memset((void*)LOC1, 0, sizeof(LOC1)); pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0); typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_295351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC6; if (!!(!((pat0 == NIM_NIL)))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_196185_1689653243(T839829468_507); internalerror_196113_155036129(LOC6); } LA4: ; start0 = ((NI) 3); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = contains_110046_4286263276(pat0, 32); if (!LOC9) goto LA10; start0 = ((NI) 1); add_178482_2381377266(&pl0, op0.r); { TY533289 LOC16; Ropeobj178006* LOC17; Ropeobj178006* LOC18; if (!(((NI) 1) < length0)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj178006*)0; LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0); add_178482_2381377266(&pl0, LOC17); LOC18 = (Ropeobj178006*)0; LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_178482_2381377266(&pl0, LOC18); start0 = ((NI) 2); } LA14: ; } goto LA7; LA10: ; { { Ropeobj178006* LOC24; TY533289 LOC25; Ropeobj178006* LOC26; if (!(((NI) 1) < length0)) goto LA22; LOC24 = (Ropeobj178006*)0; LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_178482_2381377266(&pl0, LOC24); memset((void*)LOC25, 0, sizeof(LOC25)); LOC26 = (Ropeobj178006*)0; LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0); add_178482_2381377266(&pl0, LOC26); } LA22: ; add_178482_2381377266(&pl0, op0.r); { TY533289 LOC31; Ropeobj178006* LOC32; Ropeobj178006* LOC33; if (!(((NI) 2) < length0)) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0); add_178482_2381377266(&pl0, LOC32); LOC33 = (Ropeobj178006*)0; LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0); add_178482_2381377266(&pl0, LOC33); } LA29: ; } LA7: ; { NI i_543051_839829468; NI HEX3Atmp_543617_839829468; NI res_543620_839829468; i_543051_839829468 = (NI)0; HEX3Atmp_543617_839829468 = (NI)0; HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1)); res_543620_839829468 = start0; { while (1) { Tsym292834* param0; TY533289 LOC42; Ropeobj178006* LOC43; TY533289 LOC44; Ropeobj178006* LOC45; Ropeobj178006* LOC46; if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36; i_543051_839829468 = res_543620_839829468; { NI LOC39; LOC39 = (NI)0; LOC39 = sonslen_295327_850551059(typ0); if (!(LOC39 <= i_543051_839829468)) goto LA40; internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508)); } LA40: ; param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym; memset((void*)LOC42, 0, sizeof(LOC42)); LOC43 = (Ropeobj178006*)0; LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0); add_178482_2381377266(&pl0, LOC43); add_178487_2381377266(&pl0, (*(*param0).name).s); memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj178006*)0; LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0); add_178482_2381377266(&pl0, LOC45); LOC46 = (Ropeobj178006*)0; LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0); add_178482_2381377266(&pl0, LOC46); res_543620_839829468 += ((NI) 1); } LA36: ; } } { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC53) goto LA54; { NI LOC58; TY533289 LOC61; Ropeobj178006* LOC62; LOC58 = (NI)0; LOC58 = sonslen_295351_850551059(ri0); if (!(((NI) 1) < LOC58)) goto LA59; memset((void*)LOC61, 0, sizeof(LOC61)); LOC62 = (Ropeobj178006*)0; LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0); add_178482_2381377266(&pl0, LOC62); } LA59: ; { TY533289 LOC71; Ropeobj178006* LOC72; Ropeobj178006* LOC73; TY533289 LOC74; Ropeobj178006* LOC75; if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA69; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } LA69: ; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj178006*)0; LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0); add_178482_2381377266(&pl0, LOC72); LOC73 = (Ropeobj178006*)0; LOC73 = addrloc_538204_839829468((&(*d0))); add_178482_2381377266(&pl0, LOC73); memset((void*)LOC74, 0, sizeof(LOC74)); LOC75 = (Ropeobj178006*)0; LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0); add_178482_2381377266(&pl0, LOC75); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); } goto LA63; LA65: ; { Tloc292816 tmp0; Ropeobj178006* LOC77; TY533289 LOC78; Ropeobj178006* LOC79; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC77 = (Ropeobj178006*)0; LOC77 = addrloc_538204_839829468((&tmp0)); add_178482_2381377266(&pl0, LOC77); memset((void*)LOC78, 0, sizeof(LOC78)); LOC79 = (Ropeobj178006*)0; LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0); add_178482_2381377266(&pl0, LOC79); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA63: ; } goto LA51; LA54: ; { TY533289 LOC81; Ropeobj178006* LOC82; Tloc292816 list0; memset((void*)LOC81, 0, sizeof(LOC81)); LOC82 = (Ropeobj178006*)0; LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0); add_178482_2381377266(&pl0, LOC82); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA85; gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA85: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0)); list0.r = pl0; genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0); } LA51: ; } goto LA47; LA49: ; { TY533289 LOC88; Ropeobj178006* LOC89; memset((void*)LOC88, 0, sizeof(LOC88)); LOC89 = (Ropeobj178006*)0; LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0); add_178482_2381377266(&pl0, LOC89); line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0); } LA47: ; } N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) { Tloc292816 op0; Ropeobj178006* params0; Ttype292840* typ0; NI length0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); params0 = (Ropeobj178006*)0; typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_295351_850551059(ri0); { NI i_540213_839829468; NI HEX3Atmp_540445_839829468; NI res_540448_839829468; i_540213_839829468 = (NI)0; HEX3Atmp_540445_839829468 = (NI)0; HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1)); res_540448_839829468 = ((NI) 1); { while (1) { if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3; i_540213_839829468 = res_540448_839829468; { NI LOC6; Tnode292802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_295327_850551059(typ0); if (!(i_540213_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468]; { NIM_BOOL LOC11; Ropeobj178006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY533289 LOC18; Ropeobj178006* LOC19; if (!!((params0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj178006*)0; LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_178482_2381377266(&params0, LOC19); } LA16: ; LOC20 = (Ropeobj178006*)0; LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0); add_178482_2381377266(&params0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj178006* LOC28; { TY533289 LOC26; Ropeobj178006* LOC27; if (!!((params0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj178006*)0; LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_178482_2381377266(&params0, LOC27); } LA24: ; LOC28 = (Ropeobj178006*)0; LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]); add_178482_2381377266(&params0, LOC28); } LA4: ; res_540448_839829468 += ((NI) 1); } LA3: ; } } fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0); } static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) { Ropeobj178006** LOC1; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC1, (*(*p0).module).injectstmt); } N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { { Ttype292840* LOC3; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4; genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_542616_839829468(p0, e0, d0); } goto LA1; LA14: ; { genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0); } LA1: ; poststmtactions_532942_839829468(p0); } N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) { Tloc292816 a0; TY532811 LOC1; Ttype292840* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = addrloc_538204_839829468((&a0)); LOC2 = (Ttype292840*)0; LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440)); LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2); } N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) { NIM_BOOL LOC6; Ropeobj178006* args0; Tloc292816 a0; TY532811 LOC18; NimStringDesc* LOC19; NI LOC20; NimStringDesc* LOC21; TY533289 LOC22; { NimStringDesc* LOC5; if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_196185_1689653243(T839829468_512); internalerror_196113_155036129(LOC5); } LA3: ; LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513)); args0 = NIM_NIL; memset((void*)(&a0), 0, sizeof(a0)); { NI i_554404_839829468; NI HEX3Atmp_554431_839829468; NI LOC8; NI res_554434_839829468; i_554404_839829468 = (NI)0; HEX3Atmp_554431_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_293081_850551059(n0); HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1)); res_554434_839829468 = ((NI) 0); { while (1) { if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10; i_554404_839829468 = res_554434_839829468; { Tnode292802* LOC13; LOC13 = (Tnode292802*)0; LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]); if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14; add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514)); } goto LA11; LA14: ; { TY178507 LOC17; initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0)); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_538188_839829468((&a0)); addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1); } LA11: ; res_554434_839829468 += ((NI) 1); } LA10: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (NimStringDesc*)0; LOC20 = (NI)0; LOC20 = len_293081_850551059(n0); LOC21 = (NimStringDesc*)0; LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20))); LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0); appendString(LOC19, LOC21); appendString(LOC19, tnl_176644_4151366050); LOC18[0] = makecstring_191638_155036129(LOC19); LOC18[1] = args0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2); memset((void*)LOC22, 0, sizeof(LOC22)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0); } N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { Tloc292816 arr0; NI LOC5; Ropeobj178006* LOC6; memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA3; gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA3: ; LOC5 = (NI)0; LOC5 = sonslen_295351_850551059(t0); LOC6 = (Ropeobj178006*)0; LOC6 = intliteral_539270_839829468(((NI64) (LOC5))); gennewseqaux_554795_839829468(p0, (&(*d0)), LOC6); { NI i_555031_839829468; NI HEX3Atmp_555039_839829468; NI LOC8; NI res_555042_839829468; i_555031_839829468 = (NI)0; HEX3Atmp_555039_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = sonslen_295351_850551059(t0); HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1)); res_555042_839829468 = ((NI) 0); { while (1) { Ttype292840* LOC11; Ttype292840* LOC12; TY532811 LOC13; if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10; i_555031_839829468 = res_555042_839829468; LOC11 = (Ttype292840*)0; LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256)); LOC12 = (Ttype292840*)0; LOC12 = elemtype_320394_3876443242(LOC11); initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3)); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_538188_839829468((&(*d0))); LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468))); arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2); arr0.s = ((Tstorageloc292812) 3); expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0)); res_555042_839829468 += ((NI) 1); } LA10: ; } } gcusage_554439_839829468(t0); } N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { Tloc292816 elem0; Tloc292816 a0; Tloc292816 arr0; NI L0; NI64 LOC9; Ropeobj178006* LOC10; { memset((void*)(&elem0), 0, sizeof(elem0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3; asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ); genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0); goto BeforeRet; } LA3: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA7; gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA7: ; LOC9 = (NI64)0; LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ); L0 = ((NI) (LOC9)); LOC10 = (Ropeobj178006*)0; LOC10 = intliteral_539270_839829468(((NI64) (L0))); gennewseqaux_554795_839829468(p0, (&(*d0)), LOC10); initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI i_555090_839829468; NI HEX3Atmp_555104_839829468; NI res_555107_839829468; i_555090_839829468 = (NI)0; HEX3Atmp_555104_839829468 = (NI)0; HEX3Atmp_555104_839829468 = (NI)(L0 - ((NI) 1)); res_555107_839829468 = ((NI) 0); { while (1) { Ttype292840* LOC14; Ttype292840* LOC15; TY532811 LOC16; Ttype292840* LOC17; Ttype292840* LOC18; TY532811 LOC19; if (!(res_555107_839829468 <= HEX3Atmp_555104_839829468)) goto LA13; i_555090_839829468 = res_555107_839829468; LOC14 = (Ttype292840*)0; LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256)); LOC15 = (Ttype292840*)0; LOC15 = elemtype_320394_3876443242(LOC14); initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3)); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468((&(*d0))); LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468))); elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2); elem0.s = ((Tstorageloc292812) 3); LOC17 = (Ttype292840*)0; LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256)); LOC18 = (Ttype292840*)0; LOC18 = elemtype_320394_3876443242(LOC17); initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_538188_839829468((&a0)); LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468))); arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2); genassignment_539264_839829468(p0, (&elem0), (&arr0), 3); res_555107_839829468 += ((NI) 1); } LA13: ; } } }BeforeRet: ; } N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0) { Ttype292840* ty0; ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440)); switch ((*ty0).kind) { case ((Ttypekind292244) 21): case ((Ttypekind292244) 22): case ((Ttypekind292244) 25): case ((Ttypekind292244) 18): case ((Ttypekind292244) 17): case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { TY535238 LOC2; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = addrloc_538204_839829468(dest0); LOC2[1] = addrloc_538204_839829468(src0); LOC2[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3); } break; case ((Ttypekind292244) 24): case ((Ttypekind292244) 28): { TY535238 LOC4; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = addrloc_538204_839829468(dest0); LOC4[1] = rdloc_538188_839829468(src0); LOC4[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3); } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { TY535238 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = addrloc_538204_839829468(dest0); LOC6[1] = addrloc_538204_839829468(src0); LOC6[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3); } break; case ((Ttypekind292244) 19): { { Tctypekind529007 LOC10; TY535238 LOC13; NI64 LOC14; LOC10 = (Tctypekind529007)0; LOC10 = maptype_533394_839829468(ty0); if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11; usestringh_532345_839829468((*p0).module); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_538188_839829468(dest0); LOC13[1] = rdloc_538188_839829468(src0); LOC14 = (NI64)0; LOC14 = getsize_320135_3876443242((*dest0).t); LOC13[2] = rope_178401_2381377266(LOC14); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3); } goto LA8; LA11: ; { TY532811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468(dest0); LOC16[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2); } LA8: ; } break; case ((Ttypekind292244) 26): case ((Ttypekind292244) 2): case ((Ttypekind292244) 1): case ((Ttypekind292244) 14): case ((Ttypekind292244) 29): case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44): case ((Ttypekind292244) 20): case ((Ttypekind292244) 23): { TY532811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_538188_839829468(dest0); LOC18[1] = rdloc_538188_839829468(src0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13); appendString(LOC20, ((NimStringDesc*) &T839829468_522)); appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244))); internalerror_196113_155036129(LOC20); } break; } } N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) { switch (op0) { case ((Tmagic292524) 127): case ((Tmagic292524) 126): { genandor_554311_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 99) ... ((Tmagic292524) 117): { unaryarith_552646_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 96) ... ((Tmagic292524) 98): { unaryarithoverflow_551633_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 52) ... ((Tmagic292524) 55): { binaryfloatarith_556729_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 56) ... ((Tmagic292524) 93): { binaryarith_551819_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 95): { geneqproc_552214_839829468(p0, e0, d0); } break; case ((Tmagic292524) 45) ... ((Tmagic292524) 51): { binaryarithoverflow_551262_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 149): { genrepr_555339_839829468(p0, e0, d0); } break; case ((Tmagic292524) 259): { gengettypeinfo_555383_839829468(p0, e0, d0); } break; case ((Tmagic292524) 156): { genswap_555638_839829468(p0, e0, d0); } break; case ((Tmagic292524) 25): { { if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385)); } goto LA12; LA14: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386)); } LA12: ; } break; case ((Tmagic292524) 26): case ((Tmagic292524) 27): { Ttype292840* underlying0; underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)); if (LOC20) goto LA21; LOC20 = ((IL64(34084860461056) &((NU64)1<<((NU)((*underlying0).kind)&63U)))!=0); LA21: ; if (!LOC20) goto LA22; binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]); } goto LA18; LA22: ; { Tloc292816 a0; Tloc292816 b0; Ttype292840* ranged0; Ropeobj178006* res0; NimStringDesc* LOC25; TY532811 LOC31; Ropeobj178006* LOC32; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656); LOC25 = (NimStringDesc*)0; { if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28; LOC25 = copyString(fun64_557055_839829468[(op0)- 26]); } goto LA26; LA28: ; { LOC25 = copyString(fun_557060_839829468[(op0)- 26]); } LA26: ; res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, (&a0), (&b0), LOC25); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = gettypedesc_535673_839829468((*p0).module, ranged0); LOC31[1] = res0; LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2); putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0)); } LA18: ; } break; case ((Tmagic292524) 138): { genstrconcat_554452_839829468(p0, e0, d0); } break; case ((Tmagic292524) 144): { binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394)); } break; case ((Tmagic292524) 145): { genstrappend_554554_839829468(p0, e0, d0); } break; case ((Tmagic292524) 146): { genseqelemappend_554683_839829468(p0, e0, d0); } break; case ((Tmagic292524) 128): { genstrequals_556667_839829468(p0, e0, d0); } break; case ((Tmagic292524) 129): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402)); } break; case ((Tmagic292524) 130): { binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403)); } break; case ((Tmagic292524) 157): { genisnil_552620_839829468(p0, e0, d0); } break; case ((Tmagic292524) 120): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406)); } break; case ((Tmagic292524) 121): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407)); } break; case ((Tmagic292524) 119): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408)); } break; case ((Tmagic292524) 118): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409)); } break; case ((Tmagic292524) 122): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410)); } break; case ((Tmagic292524) 123): { gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411)); } break; case ((Tmagic292524) 124): { expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Tmagic292524) 125): { genrepr_555339_839829468(p0, e0, d0); } break; case ((Tmagic292524) 12): { genof_555331_839829468(p0, e0, d0); } break; case ((Tmagic292524) 29): { gennew_554782_839829468(p0, e0); } break; case ((Tmagic292524) 30): { gennewfinalize_555111_839829468(p0, e0); } break; case ((Tmagic292524) 31): { gennewseq_554824_839829468(p0, e0); } break; case ((Tmagic292524) 32): { gennewseqofcap_554836_839829468(p0, e0, d0); } break; case ((Tmagic292524) 9): { Ttype292840* t0; TY178507 LOC55; Ropeobj178006* LOC56; t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256); memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC56 = (Ropeobj178006*)0; LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0)); } break; case ((Tmagic292524) 42): { gensomecast_556481_839829468(p0, e0, d0); } break; case ((Tmagic292524) 28): { genord_556475_839829468(p0, e0, d0); } break; case ((Tmagic292524) 35): case ((Tmagic292524) 8): case ((Tmagic292524) 34): case ((Tmagic292524) 36): case ((Tmagic292524) 33): { genarraylen_555415_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 37): case ((Tmagic292524) 38): { { NIM_BOOL LOC63; LOC63 = (NIM_BOOL)0; LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC63) goto LA64; LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA64: ; if (!!(LOC63)) goto LA65; unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440)); } goto LA61; LA65: ; { unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441)); } LA61: ; } break; case ((Tmagic292524) 43): { unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443)); } break; case ((Tmagic292524) 44): { unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444)); } break; case ((Tmagic292524) 151): { gensetlengthstr_555632_839829468(p0, e0, d0); } break; case ((Tmagic292524) 152): { gensetlengthseq_555500_839829468(p0, e0, d0); } break; case ((Tmagic292524) 39): case ((Tmagic292524) 40): case ((Tmagic292524) 41): case ((Tmagic292524) 133): case ((Tmagic292524) 132): case ((Tmagic292524) 131): case ((Tmagic292524) 134): case ((Tmagic292524) 135): case ((Tmagic292524) 136): case ((Tmagic292524) 148): { gensetop_556419_839829468(p0, e0, d0, op0); } break; case ((Tmagic292524) 161): case ((Tmagic292524) 162): case ((Tmagic292524) 159): case ((Tmagic292524) 160): case ((Tmagic292524) 150): case ((Tmagic292524) 163): { Tsym292834* opr0; opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NimStringDesc* LOC78; Ropeobj178006* LOC79; if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76; LOC78 = (NimStringDesc*)0; LOC78 = HEX24_178856_2381377266((*opr0).loc.r); LOC79 = (Ropeobj178006*)0; LOC79 = cgsym_532403_839829468((*p0).module, LOC78); } LA76: ; gencall_543632_839829468(p0, e0, d0); } break; case ((Tmagic292524) 164): { genreset_554731_839829468(p0, e0); } break; case ((Tmagic292524) 17): { Tnode292802* LOC82; Tnode292802* LOC83; LOC82 = (Tnode292802*)0; LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1)); LOC83 = (Tnode292802*)0; LOC83 = skipconv_328882_3876443242(LOC82); genecho_554369_839829468(p0, LOC83); } break; case ((Tmagic292524) 158): { genarrtoseq_555046_839829468(p0, e0, d0); } break; case ((Tmagic292524) 223) ... ((Tmagic292524) 257): case ((Tmagic292524) 19) ... ((Tmagic292524) 24): { localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); } break; case ((Tmagic292524) 208): { Tnode292802* n0; n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL); expr_539248_839829468(p0, n0, d0); } break; case ((Tmagic292524) 155): { Tnode292802* n0; n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0); expr_539248_839829468(p0, n0, d0); } break; case ((Tmagic292524) 209): { Tloc292816 a0; Tloc292816 b0; Tnode292802* x0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { Tnode292802* LOC91; Tnode292802* LOC94; LOC91 = (Tnode292802*)0; LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1)); if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92; LOC94 = (Tnode292802*)0; LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1)); x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0)); } goto LA89; LA92: ; { x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1)); } LA89: ; initlocexpr_539283_839829468(p0, x0, (&a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); gendeepcopy_550374_839829468(p0, (&a0), (&b0)); } break; case ((Tmagic292524) 140): case ((Tmagic292524) 94): { gencall_543632_839829468(p0, e0, d0); } break; default: { NimStringDesc* LOC98; LOC98 = (NimStringDesc*)0; LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14); appendString(LOC98, ((NimStringDesc*) &T839829468_523)); appendString(LOC98, reprEnum((NI)op0, (&NTI292524))); internalerror_196100_155036129((*e0).info, LOC98); } break; } } N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) { Ropeobj178006* result0; Tbitset339004* cs0; NI size0; NI64 LOC1; result0 = (Ropeobj178006*)0; cs0 = (Tbitset339004*)0; LOC1 = (NI64)0; LOC1 = getsize_320135_3876443242((*n0).typ); size0 = ((NI) (LOC1)); tobitset_340001_452470228(n0, (&cs0)); { NI id0; Ropeobj178006* LOC6; if (!(((NI) 8) < size0)) goto LA4; id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC6 = (Ropeobj178006*)0; LOC6 = rope_178401_2381377266(((NI64) (id0))); result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6); { TY535238 LOC11; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ); LOC11[1] = result0; LOC11[2] = genrawsetdata_549629_839829468(cs0, size0); addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3); } LA9: ; } goto LA2; LA4: ; { result0 = genrawsetdata_549629_839829468(cs0, size0); } LA2: ; return result0; } N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Tloc292816 idx0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&idx0), 0, sizeof(idx0)); { Ropeobj178006* LOC5; if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = gensetnode_549664_839829468(p0, e0); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0)); } goto LA1; LA3: ; { { if (!((*d0).k == ((Tlockind292808) 0))) goto LA9; gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE); } LA9: ; { NI64 LOC13; TY178507 LOC16; LOC13 = (NI64)0; LOC13 = getsize_320135_3876443242((*e0).typ); if (!(IL64(8) < LOC13)) goto LA14; usestringh_532345_839829468((*p0).module); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468((&(*d0))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1); { NI i_557537_839829468; NI HEX3Atmp_557603_839829468; NI LOC18; NI res_557606_839829468; i_557537_839829468 = (NI)0; HEX3Atmp_557603_839829468 = (NI)0; LOC18 = (NI)0; LOC18 = sonslen_295351_850551059(e0); HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1)); res_557606_839829468 = ((NI) 0); { while (1) { if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20; i_557537_839829468 = res_557606_839829468; { Ttype292840* LOC25; TY535235 LOC26; if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23; LOC25 = (Ttype292840*)0; LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE); initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_538188_839829468((&idx0)); LOC26[1] = rdloc_538188_839829468((&(*d0))); LOC26[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ); LOC26[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4); } goto LA21; LA23: ; { TY532811 LOC28; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0)); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = rdloc_538188_839829468((&(*d0))); LOC28[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2); } LA21: ; res_557606_839829468 += ((NI) 1); } LA20: ; } } } goto LA11; LA14: ; { NimStringDesc* ts0; NimStringDesc* LOC30; NI64 LOC31; NimStringDesc* LOC32; TY178507 LOC33; LOC30 = (NimStringDesc*)0; LOC31 = (NI64)0; LOC31 = getsize_320135_3876443242((*e0).typ); LOC32 = (NimStringDesc*)0; LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8))); LOC30 = rawNewString(LOC32->Sup.len + 2); appendString(LOC30, ((NimStringDesc*) &T839829468_45)); appendString(LOC30, LOC32); ts0 = LOC30; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_538188_839829468((&(*d0))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1); { NI i_557575_839829468; NI HEX3Atmp_557611_839829468; NI LOC35; NI res_557614_839829468; i_557575_839829468 = (NI)0; HEX3Atmp_557611_839829468 = (NI)0; LOC35 = (NI)0; LOC35 = sonslen_295351_850551059(e0); HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1)); res_557614_839829468 = ((NI) 0); { while (1) { if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37; i_557575_839829468 = res_557614_839829468; { Ttype292840* LOC42; NimStringDesc* LOC43; TY535235 LOC44; if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40; LOC42 = (Ttype292840*)0; LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31)); gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE); initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); LOC43 = (NimStringDesc*)0; LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68); appendString(LOC43, ((NimStringDesc*) &T839829468_528)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_529)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_538188_839829468((&idx0)); LOC44[1] = rdloc_538188_839829468((&(*d0))); LOC44[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ); LOC44[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ); linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4); } goto LA38; LA40: ; { NimStringDesc* LOC46; TY532811 LOC47; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0)); LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36); appendString(LOC46, ((NimStringDesc*) &T839829468_530)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_531)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = rdloc_538188_839829468((&(*d0))); LOC47[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ); linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2); } LA38: ; res_557614_839829468 += ((NI) 1); } LA37: ; } } } LA11: ; } LA1: ; } N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Ttype292840* t0; Ropeobj178006* LOC1; NI id0; Ropeobj178006* tmp0; Ropeobj178006* LOC2; t0 = getuniquetype_528640_2036603609((*n0).typ); LOC1 = (Ropeobj178006*)0; LOC1 = gettypedesc_535673_839829468((*p0).module, t0); id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC2 = (Ropeobj178006*)0; LOC2 = rope_178401_2381377266(((NI64) (id0))); tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2); { TY535238 LOC7; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC7[1] = tmp0; LOC7[2] = genconstexpr_554849_839829468(p0, n0); addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3); } LA5: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA10; fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1)); } goto LA8; LA10: ; { putdataintodest_550436_839829468(p0, d0, t0, tmp0); { if (!!(((285212672 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0))) goto LA15; (*d0).s = ((Tstorageloc292812) 1); } LA15: ; } LA8: ; } N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; NI LOC6; Ttype292840* t0; Ropeobj178006* LOC10; NI id0; Ropeobj178006* LOC11; Ropeobj178006* LOC12; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = ((*d0).k == ((Tlockind292808) 0)); if (!(LOC4)) goto LA5; LOC6 = (NI)0; LOC6 = len_293081_850551059(n0); LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA7; LOC3 = isdeepconstexpr_318566_2616423590(n0); LA7: ; if (!LOC3) goto LA8; t0 = getuniquetype_528640_2036603609((*n0).typ); LOC10 = (Ropeobj178006*)0; LOC10 = gettypedesc_535673_839829468((*p0).module, t0); id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC11 = (Ropeobj178006*)0; LOC11 = rope_178401_2381377266(((NI64) (id0))); LOC12 = (Ropeobj178006*)0; LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11); fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1)); { TY535238 LOC17; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_535673_839829468((*p0).module, t0); LOC17[1] = (*d0).r; LOC17[2] = genconstexpr_554849_839829468(p0, n0); addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; result0 = NIM_TRUE; } goto LA1; LA8: ; { result0 = NIM_FALSE; } LA1: ; return result0; } N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 arr0; memset((void*)(&arr0), 0, sizeof(arr0)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_554853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA8; gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA8: ; { NI i_558234_839829468; NI HEX3Atmp_558242_839829468; NI LOC11; NI res_558245_839829468; i_558234_839829468 = (NI)0; HEX3Atmp_558242_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = sonslen_295351_850551059(n0); HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1)); res_558245_839829468 = ((NI) 0); { while (1) { Ttype292840* LOC14; Ttype292840* LOC15; TY532811 LOC16; if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13; i_558234_839829468 = res_558245_839829468; LOC14 = (Ttype292840*)0; LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256)); LOC15 = (Ttype292840*)0; LOC15 = elemtype_320394_3876443242(LOC14); initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468((&(*d0))); LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468))); arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2); expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0)); res_558245_839829468 += ((NI) 1); } LA13: ; } } } LA4: ; } N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 rec0; memset((void*)(&rec0), 0, sizeof(rec0)); { NIM_BOOL LOC3; Ttype292840* t0; Ropeobj178006* LOC6; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_554853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; t0 = getuniquetype_528640_2036603609((*n0).typ); LOC6 = (Ropeobj178006*)0; LOC6 = gettypedesc_535673_839829468((*p0).module, t0); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA9; gettemp_537032_839829468(p0, t0, d0, NIM_FALSE); } LA9: ; { NI i_557646_839829468; NI HEX3Atmp_557803_839829468; NI LOC12; NI res_557806_839829468; i_557646_839829468 = (NI)0; HEX3Atmp_557803_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = sonslen_295351_850551059(n0); HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1)); res_557806_839829468 = ((NI) 0); { while (1) { Tnode292802* it0; TY532811 LOC19; if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14; i_557646_839829468 = res_557806_839829468; it0 = (*n0).kindU.S6.sons->data[i_557646_839829468]; { if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA17: ; initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_538188_839829468((&(*d0))); LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468))); rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2); expr_539248_839829468(p0, it0, (&rec0)); res_557806_839829468 += ((NI) 1); } LA14: ; } } } LA4: ; } N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0) { Tsym292834* result0; Ttype292840* ty0; result0 = (Tsym292834*)0; ty0 = ty_553157_839829468; { while (1) { if (!!((ty0 == NIM_NIL))) goto LA2; ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360)); result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name); { if (!!((result0 == NIM_NIL))) goto LA5; goto LA1; } LA5: ; { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC9) goto LA10; LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA10: ; if (!!(LOC9)) goto LA11; add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]); } LA2: ; } LA1: ; { if (!(result0 == NIM_NIL)) goto LA15; internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532)); } LA15: ; return result0; } N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) { Tloc292816 test0; Tloc292816 u0; Tloc292816 v0; memset((void*)(&test0), 0, sizeof(test0)); memset((void*)(&u0), 0, sizeof(u0)); memset((void*)(&v0), 0, sizeof(v0)); { NI i_553525_839829468; NI HEX3Atmp_554039_839829468; NI LOC2; NI res_554042_839829468; i_553525_839829468 = (NI)0; HEX3Atmp_554039_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(e0); HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1)); res_554042_839829468 = ((NI) 1); { while (1) { Tnode292802* it0; Tsym292834* op0; Tnode292802* disc0; Ropeobj178006* o0; Tsym292834* d0; NI id0; Tnode292802* LOC9; Ropeobj178006* strlit0; if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4; i_553525_839829468 = res_554042_839829468; it0 = (*e0).kindU.S6.sons->data[i_553525_839829468]; op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA7: ; disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]); initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2)); initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0)); o0 = obj0; d0 = lookupfieldagain_553154_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0); initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0)); v0.r = o0; add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257)); add_178482_2381377266(&v0.r, (*d0).loc.r); geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0)); LOC9 = (Tnode292802*)0; LOC9 = newstrnode_293677_850551059(((Tnodekind292020) 20), (*(*field0).name).s); id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels))); { if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12; strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s); } goto LA10; LA12: ; { Ropeobj178006* LOC15; LOC15 = (Ropeobj178006*)0; LOC15 = rope_178401_2381377266(((NI64) (id0))); strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15); } LA10: ; { TY532811 LOC20; if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_538188_839829468((&test0)); LOC20[1] = strlit0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2); } goto LA16; LA18: ; { TY532811 LOC22; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = rdloc_538188_839829468((&test0)); LOC22[1] = strlit0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2); } LA16: ; res_554042_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 tmp0; Ttype292840* t0; NIM_BOOL isref0; Ropeobj178006* r0; Ropeobj178006* LOC13; Ttype292840* ty0; { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_554853_839829468(p0, e0, d0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; memset((void*)(&tmp0), 0, sizeof(tmp0)); t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256)); gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE); isref0 = ((*t0).kind == ((Ttypekind292244) 22)); r0 = rdloc_538188_839829468((&tmp0)); { Ttype292840* LOC10; TY178507 LOC11; if (!isref0) goto LA8; rawgennew_554741_839829468(p0, (&tmp0), NIM_NIL); LOC10 = (Ttype292840*)0; LOC10 = lastson_295377_850551059(t0); t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = r0; r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1); gcusage_554439_839829468(e0); } goto LA6; LA8: ; { constructloc_538388_839829468(p0, (&tmp0), NIM_FALSE); } LA6: ; LOC13 = (Ropeobj178006*)0; LOC13 = gettypedesc_535673_839829468((*p0).module, t0); ty0 = getuniquetype_528640_2036603609(t0); { NI i_554944_839829468; NI HEX3Atmp_554997_839829468; NI LOC15; NI res_555000_839829468; i_554944_839829468 = (NI)0; HEX3Atmp_554997_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = len_293081_850551059(e0); HEX3Atmp_554997_839829468 = (LOC15 - 1); res_555000_839829468 = ((NI) 1); { while (1) { Tnode292802* it0; Tloc292816 tmp20; Tsym292834* field0; if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17; i_554944_839829468 = res_555000_839829468; it0 = (*e0).kindU.S6.sons->data[i_554944_839829468]; memset((void*)(&tmp20), 0, sizeof(tmp20)); tmp20.r = r0; field0 = lookupfieldagain_553154_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r); { if (!((*field0).loc.r == NIM_NIL)) goto LA20; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533)); } LA20: ; { NIM_BOOL LOC24; NI LOC25; LOC24 = (NIM_BOOL)0; LOC25 = (NI)0; LOC25 = len_293081_850551059(it0); LOC24 = (LOC25 == ((NI) 3)); if (!(LOC24)) goto LA26; LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0); LA26: ; if (!LOC24) goto LA27; genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0); } LA27: ; add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257)); add_178482_2381377266(&tmp20.r, (*field0).loc.r); tmp20.k = ((Tlockind292808) 1); tmp20.t = (*field0).loc.t; { if (!isref0) goto LA31; tmp20.s = ((Tstorageloc292812) 3); } goto LA29; LA31: ; { tmp20.s = ((Tstorageloc292812) 2); } LA29: ; expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20)); res_555000_839829468 += ((NI) 1); } LA17: ; } } { if (!((*d0).k == ((Tlockind292808) 0))) goto LA36; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816)); } goto LA34; LA36: ; { genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0); } LA34: ; }BeforeRet: ; } N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Ttype292840* destt0; Ttype292840* srct0; destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832)); srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; Ropeobj178006* lbl0; Tloc292816 tmp0; TY178507 LOC7; TY535238 LOC8; TY178507 LOC9; Ropeobj178006* LOC10; LOC3 = (NIM_BOOL)0; LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*destt0).kind)&63U)))!=0); if (LOC3) goto LA4; LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*srct0).kind)&63U)))!=0); LA4: ; if (!LOC3) goto LA5; (*p0).labels += ((NI) 1); lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels))); memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = lbl0; tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_535673_839829468((*p0).module, srct0); LOC8[1] = gettypedesc_535673_839829468((*p0).module, destt0); LOC8[2] = lbl0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3); tmp0.k = ((Tlockind292808) 6); tmp0.t = srct0; tmp0.s = ((Tstorageloc292812) 2); tmp0.flags = 0; expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = lbl0; LOC10 = (Ropeobj178006*)0; LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s); } goto LA1; LA5: ; { gensomecast_556481_839829468(p0, e0, d0); } LA1: ; } N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Ttype292840* desttype0; desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0); if (!LOC3) goto LA4; expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } goto LA1; LA4: ; { gensomecast_556481_839829468(p0, e0, d0); } LA1: ; } static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; NIM_BOOL LOC3; Ttype292840* LOC6; Ttype292840* LOC8; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; LOC2 = LOC3; if (!(LOC2)) goto LA5; LOC6 = (Ttype292840*)0; LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256)); LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23)); LA5: ; LOC1 = LOC2; if (!(LOC1)) goto LA7; LOC8 = (Ttype292840*)0; LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256)); LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0)); LA7: ; result0 = LOC1; return result0; } N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { { Ttype292840* LOC3; Tloc292816 a0; Ropeobj178006* LOC6; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((6291456 &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0)) goto LA4; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC6 = (Ropeobj178006*)0; LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } goto LA1; LA4: ; { NIM_BOOL LOC8; Tctypekind529007 LOC9; LOC8 = (NIM_BOOL)0; LOC9 = (Tctypekind529007)0; LOC9 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LOC8 = (LOC9 == ((Tctypekind529007) 17)); if (LOC8) goto LA10; LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LA10: ; if (!LOC8) goto LA11; expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA11: ; { Tloc292816 a0; Ropeobj178006* LOC14; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC14 = (Ropeobj178006*)0; LOC14 = addrloc_538204_839829468((&a0)); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s); } LA1: ; } N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* ty0; Ttype292840* LOC1; Ropeobj178006* first0; NI64 LOC2; Ttype292840* LOC47; Ttype292840* LOC48; TY535238 LOC49; Ropeobj178006* LOC50; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, x0, (&a0)); initlocexpr_539283_839829468(p0, y0, (&b0)); LOC1 = (Ttype292840*)0; LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440)); ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320)); LOC2 = (NI64)0; LOC2 = firstord_320001_3876443242(ty0); first0 = intliteral_539270_839829468(LOC2); { NIM_BOOL LOC5; LOC5 = (NIM_BOOL)0; LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)); LA6: ; if (!LOC5) goto LA7; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = isconstexpr_318510_2616423590(y0); if (!!(LOC11)) goto LA12; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = firstord_320001_3876443242(ty0); if (!(LOC16 == IL64(0))) goto LA17; { NIM_BOOL LOC21; NI64 LOC22; NI64 LOC23; NI64 LOC25; NI64 LOC26; TY532811 LOC29; NI64 LOC30; LOC21 = (NIM_BOOL)0; LOC22 = (NI64)0; LOC22 = firstord_320001_3876443242(b0.t); LOC23 = (NI64)0; LOC23 = firstord_320001_3876443242(ty0); LOC21 = (LOC22 < LOC23); if (LOC21) goto LA24; LOC25 = (NI64)0; LOC25 = lastord_320004_3876443242(ty0); LOC26 = (NI64)0; LOC26 = lastord_320004_3876443242(b0.t); LOC21 = (LOC25 < LOC26); LA24: ; if (!LOC21) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdcharloc_538227_839829468((&b0)); LOC30 = (NI64)0; LOC30 = lastord_320004_3876443242(ty0); LOC29[1] = intliteral_539270_839829468(LOC30); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2); } LA27: ; } goto LA14; LA17: ; { TY535238 LOC32; NI64 LOC33; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rdcharloc_538227_839829468((&b0)); LOC32[1] = first0; LOC33 = (NI64)0; LOC33 = lastord_320004_3876443242(ty0); LOC32[2] = intliteral_539270_839829468(LOC33); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3); } LA14: ; } goto LA9; LA12: ; { NI64 idx0; idx0 = getordvalue_320129_3876443242(y0); { NIM_BOOL LOC37; NI64 LOC38; NI64 LOC40; LOC37 = (NIM_BOOL)0; LOC38 = (NI64)0; LOC38 = firstord_320001_3876443242(ty0); LOC37 = (idx0 < LOC38); if (LOC37) goto LA39; LOC40 = (NI64)0; LOC40 = lastord_320004_3876443242(ty0); LOC37 = (LOC40 < idx0); LA39: ; if (!LOC37) goto LA41; localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490)); } LA41: ; } LA9: ; } LA7: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA45; (*d0).s = a0.s; } LA45: ; LOC47 = (Ttype292840*)0; LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864)); LOC48 = (Ttype292840*)0; LOC48 = elemtype_320394_3876443242(LOC47); memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_538188_839829468((&a0)); LOC49[1] = rdcharloc_538227_839829468((&b0)); LOC49[2] = first0; LOC50 = (Ropeobj178006*)0; LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3); putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s); } N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* LOC10; Ttype292840* LOC11; TY532811 LOC12; Ropeobj178006* LOC13; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, x0, (&a0)); initlocexpr_539283_839829468(p0, y0, (&b0)); { TY532811 LOC5; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&b0)); LOC5[1] = rdloc_538188_839829468((&a0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2); } LA3: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA8; (*d0).s = a0.s; } LA8: ; LOC10 = (Ttype292840*)0; LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864)); LOC11 = (Ttype292840*)0; LOC11 = elemtype_320394_3876443242(LOC10); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_538188_839829468((&a0)); LOC12[1] = rdcharloc_538227_839829468((&b0)); LOC13 = (Ropeobj178006*)0; LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2); putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s); } N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* ty0; Ttype292840* LOC27; Ttype292840* LOC28; TY532811 LOC29; Ropeobj178006* LOC30; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, x0, (&a0)); initlocexpr_539283_839829468(p0, y0, (&b0)); ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440)); { Ttype292840* LOC5; if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3; LOC5 = (Ttype292840*)0; LOC5 = lastson_295377_850551059(ty0); ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440)); } LA3: ; { if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8; { TY535238 LOC14; if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_538188_839829468((&b0)); LOC14[1] = rdloc_538188_839829468((&a0)); LOC14[2] = lenfield_539305_839829468(p0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3); } goto LA10; LA12: ; { TY535238 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_538188_839829468((&b0)); LOC16[1] = rdloc_538188_839829468((&a0)); LOC16[2] = lenfield_539305_839829468(p0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3); } LA10: ; } LA8: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA19; (*d0).s = ((Tstorageloc292812) 3); } LA19: ; { Ttype292840* LOC23; TY178507 LOC26; LOC23 = (Ttype292840*)0; LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864)); if (!((6291456 &((NU64)1<<((NU)((*LOC23).kind)&63U)))!=0)) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = a0.r; a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1); } LA24: ; LOC27 = (Ttype292840*)0; LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864)); LOC28 = (Ttype292840*)0; LOC28 = elemtype_320394_3876443242(LOC27); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_538188_839829468((&a0)); LOC29[1] = rdcharloc_538227_839829468((&b0)); LOC30 = (Ropeobj178006*)0; LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2); putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s); } N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) { Tloc292816 a0; Tloc292816 b0; Ttype292840* ty0; Ttype292840* LOC5; Ttype292840* LOC6; TY532811 LOC7; Ropeobj178006* LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, x0, (&a0)); initlocexpr_539283_839829468(p0, y0, (&b0)); ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440)); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ttype292840*)0; LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864)); LOC6 = (Ttype292840*)0; LOC6 = elemtype_320394_3876443242(LOC5); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_538188_839829468((&a0)); LOC7[1] = rdcharloc_538227_839829468((&b0)); LOC8 = (Ropeobj178006*)0; LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2); putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s); } N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; NI i0; Ropeobj178006* LOC5; Ttype292840* ty0; Ropeobj178006* r0; TY178507 LOC8; memset((void*)(&a0), 0, sizeof(a0)); i0 = (NI)0; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!((*d0).k == ((Tlockind292808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ropeobj178006*)0; LOC5 = gettypedesc_535673_839829468((*p0).module, a0.t); ty0 = getuniquetype_528640_2036603609(a0.t); r0 = rdloc_538188_839829468((&a0)); switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) { case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15): { i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval)); } break; default: { internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545)); } break; } memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_178401_2381377266(((NI64) (i0))); addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1); putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s); } N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Ttype292840* ty0; ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); { Ttype292840* LOC5; if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3; LOC5 = (Ttype292840*)0; LOC5 = lastson_295377_850551059(ty0); ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440)); } LA3: ; switch ((*ty0).kind) { case ((Ttypekind292244) 16): case ((Ttypekind292244) 4): { genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind292244) 27): case ((Ttypekind292244) 48): { genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind292244) 24): case ((Ttypekind292244) 28): { genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind292244) 29): { gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind292244) 18): { gentupleelem_553124_839829468(p0, n0, d0); } break; default: { NimStringDesc* LOC12; LOC12 = (NimStringDesc*)0; LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21); appendString(LOC12, ((NimStringDesc*) &T839829468_547)); appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244))); appendChar(LOC12, 41); internalerror_196100_155036129((*n0).info, LOC12); } break; } } N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) { Tctypekind529007 mt0; { mt0 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0); if (!(LOC3)) goto LA4; LOC3 = !(enforcederef0); LA4: ; if (!LOC3) goto LA5; expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); { Ttype292840* LOC9; LOC9 = (Ttype292840*)0; LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10; (*d0).s = ((Tstorageloc292812) 3); } LA10: ; } goto LA1; LA5: ; { Tloc292816 a0; Ttype292840* typ0; memset((void*)(&a0), 0, sizeof(a0)); typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NIM_BOOL LOC15; NIM_BOOL LOC16; NIM_BOOL LOC17; NIM_BOOL LOC20; Tnode292802* LOC25; Tnode292802* LOC26; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC17 = (NIM_BOOL)0; LOC17 = ((*typ0).kind == ((Ttypekind292244) 23)); if (!(LOC17)) goto LA18; LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0)); LA18: ; LOC16 = LOC17; if (!(LOC16)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA21: ; LOC16 = LOC20; LA19: ; LOC15 = LOC16; if (!(LOC15)) goto LA22; LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64)); LA22: ; if (!LOC15) goto LA23; LOC25 = (Tnode292802*)0; LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0)); LOC26 = (Tnode292802*)0; LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0)); initlocexprsingleuse_539289_839829468(p0, LOC26, d0); goto BeforeRet; } goto LA13; LA23: ; { initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA13: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA30; switch ((*typ0).kind) { case ((Ttypekind292244) 22): { (*d0).s = ((Tstorageloc292812) 3); } break; case ((Ttypekind292244) 23): { (*d0).s = ((Tstorageloc292812) 0); { NIM_BOOL LOC36; NIM_BOOL LOC37; NIM_BOOL LOC39; Ropeobj178006* LOC44; LOC36 = (NIM_BOOL)0; LOC37 = (NIM_BOOL)0; LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0)); if (!(LOC37)) goto LA38; LOC39 = (NIM_BOOL)0; LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC39) goto LA40; LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA40: ; LOC37 = LOC39; LA38: ; LOC36 = LOC37; if (!(LOC36)) goto LA41; LOC36 = ((*e0).kind == ((Tnodekind292020) 65)); LA41: ; if (!LOC36) goto LA42; LOC44 = (Ropeobj178006*)0; LOC44 = rdloc_538188_839829468((&a0)); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s); goto BeforeRet; } LA42: ; } break; case ((Ttypekind292244) 21): { (*d0).s = ((Tstorageloc292812) 0); } break; default: { NimStringDesc* LOC47; LOC47 = (NimStringDesc*)0; LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9); appendString(LOC47, ((NimStringDesc*) &T839829468_548)); appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244))); internalerror_196100_155036129((*e0).info, LOC47); } break; } } goto LA28; LA30: ; { NIM_BOOL LOC49; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA50: ; if (!LOC49) goto LA51; { NIM_BOOL LOC55; NIM_BOOL LOC56; Ropeobj178006* LOC61; LOC55 = (NIM_BOOL)0; LOC56 = (NIM_BOOL)0; LOC56 = ((*typ0).kind == ((Ttypekind292244) 23)); if (!(LOC56)) goto LA57; LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0)); LA57: ; LOC55 = LOC56; if (!(LOC55)) goto LA58; LOC55 = ((*e0).kind == ((Tnodekind292020) 65)); LA58: ; if (!LOC55) goto LA59; LOC61 = (Ropeobj178006*)0; LOC61 = rdloc_538188_839829468((&a0)); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s); goto BeforeRet; } LA59: ; } goto LA28; LA51: ; LA28: ; { NIM_BOOL LOC64; Ropeobj178006* LOC68; LOC64 = (NIM_BOOL)0; LOC64 = enforcederef0; if (!(LOC64)) goto LA65; LOC64 = (mt0 == ((Tctypekind529007) 18)); LA65: ; if (!LOC64) goto LA66; LOC68 = (Ropeobj178006*)0; LOC68 = rdloc_538188_839829468((&a0)); putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s); } goto LA62; LA66: ; { TY178507 LOC70; Ropeobj178006* LOC71; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rdloc_538188_839829468((&a0)); LOC71 = (Ropeobj178006*)0; LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1); putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s); } LA62: ; } LA1: ; }BeforeRet: ; } N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) { Ttype292840* result0; Ropeobj178006* LOC9; result0 = (Ttype292840*)0; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0); { if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549)); } LA3: ; { if (!((*d0).k == ((Tlockind292808) 0))) goto LA7; (*d0).s = (*a0).s; } LA7: ; LOC9 = (Ropeobj178006*)0; LOC9 = gettypedesc_535673_839829468((*p0).module, (*a0).t); result0 = getuniquetype_528640_2036603609((*a0).t); return result0; } N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* ty0; Ropeobj178006* r0; Tsym292834* f0; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0)); r0 = rdloc_538188_839829468((&a0)); f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; { TY178507 LOC5; if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position))); addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1); putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s); } goto LA1; LA3: ; { Tsym292834* field0; TY178507 LOC11; field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA9; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550)); } LA9: ; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*field0).loc.r; addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1); putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s); } LA1: ; } N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) { { Tloc292816 a0; Ttype292840* ty0; Ropeobj178006* r0; Tsym292834* f0; Tsym292834* field0; TY178507 LOC9; Ropeobj178006* LOC10; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0)); r0 = rdloc_538188_839829468((&a0)); f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA7; internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532)); } LA7: ; genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = (*field0).loc.r; LOC10 = (Ropeobj178006*)0; LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1); add_178482_2381377266(&r0, LOC10); putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s); } goto LA1; LA3: ; { genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } LA1: ; } N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) { NI result0; result0 = (NI)0; linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0); (*p0).labels += ((NI) 1); result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0); (*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1))))); (*p0).blocks->data[result0].id = ((NI) ((*p0).labels)); (*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0))); (*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock)); return result0; } N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0]; { TY178507 LOC5; if (!(((NI16) 0) < (*b0).framelen)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen))); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1); } LA3: ; add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]); add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]); return result0; } N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) { NI topblock0; Ropeobj178006* LOC1; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); LOC1 = (Ropeobj178006*)0; LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0])); add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1); (*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0))); line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0); } N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) { NI topblock0; Ropeobj178006* blockend0; NI16 framelen0; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); { TY178507 LOC5; if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).blocks->data[topblock0].label; blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1); } goto LA1; LA3: ; { TY533289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0); } LA1: ; framelen0 = (*p0).blocks->data[topblock0].framelen; { TY178507 LOC12; if (!(((NI16) 0) < framelen0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_178401_2381377266(((NI64) (framelen0))); addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1); } LA10: ; endblock_544035_839829468(p0, blockend0); } N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { NI oldbreakidx_546099_839829468; TY533289 LOC8; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_297441_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind292808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; oldbreakidx_546099_839829468 = (*p0).breakidx; memset((void*)LOC8, 0, sizeof(LOC8)); (*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0); { Tsym292834* sym0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; (*sym0).loc.k = ((Tlockind292808) 10); (*sym0).position = (NI)((*p0).breakidx + ((NI) 1)); } LA11: ; expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0); endblock_544060_839829468(p0); (*p0).breakidx = oldbreakidx_546099_839829468; } N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { NI length0; length0 = sonslen_295351_850551059(n0); { NI i_558420_839829468; NI HEX3Atmp_558424_839829468; NI res_558427_839829468; i_558420_839829468 = (NI)0; HEX3Atmp_558424_839829468 = (NI)0; HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2)); res_558427_839829468 = ((NI) 0); { while (1) { if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3; i_558420_839829468 = res_558427_839829468; genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]); res_558427_839829468 += ((NI) 1); } LA3: ; } } { if (!(((NI) 0) < length0)) goto LA6; expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); } LA6: ; } N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 a0; Ropeobj178006* lelse0; Ropeobj178006* lend0; memset((void*)(&a0), 0, sizeof(a0)); lelse0 = (Ropeobj178006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_297441_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind292808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_532823_839829468(p0, n0); lend0 = getlabel_539217_839829468(p0); { NI i_545011_839829468; NI HEX3Atmp_545435_839829468; NI LOC9; NI res_545438_839829468; i_545011_839829468 = (NI)0; HEX3Atmp_545435_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = sonslen_295351_850551059(n0); HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1)); res_545438_839829468 = ((NI) 0); { while (1) { Tnode292802* it0; if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11; i_545011_839829468 = res_545438_839829468; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*d0).k == ((Tlockind292808) 1)); if (!(LOC14)) goto LA15; LOC14 = isemptytype_297441_850551059((*n0).typ); LA15: ; if (!LOC14) goto LA16; (*d0).k = ((Tlockind292808) 0); } LA16: ; it0 = (*n0).kindU.S6.sons->data[i_545011_839829468]; { NI LOC20; TY533289 LOC23; NI LOC24; TY532811 LOC25; LOC20 = (NI)0; LOC20 = len_293081_850551059(it0); if (!(LOC20 == ((NI) 2))) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC24 = (NI)0; LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0); initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0)); lelse0 = getlabel_539217_839829468(p0); (*p0).labels += ((NI) 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_538188_839829468((&a0)); LOC25[1] = lelse0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2); { NIM_BOOL LOC28; Ropeobj178006** LOC32; Ropeobj178006** LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC28) goto LA29; LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA29: ; if (!LOC28) goto LA30; LOC32 = (Ropeobj178006**)0; LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223)); expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); LOC33 = (Ropeobj178006**)0; LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280)); } goto LA26; LA30: ; { expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); } LA26: ; endblock_544060_839829468(p0); { NI LOC37; TY178507 LOC40; LOC37 = (NI)0; LOC37 = sonslen_295351_850551059(n0); if (!(((NI) 1) < LOC37)) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = lend0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1); } LA38: ; fixlabel_539230_839829468(p0, lelse0); } goto LA18; LA21: ; { NI LOC42; TY533289 LOC45; NI LOC46; LOC42 = (NI)0; LOC42 = len_293081_850551059(it0); if (!(LOC42 == ((NI) 1))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0); endblock_544060_839829468(p0); } goto LA18; LA43: ; { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557)); } LA18: ; res_545438_839829468 += ((NI) 1); } LA11: ; } } { NI LOC50; LOC50 = (NI)0; LOC50 = sonslen_295351_850551059(n0); if (!(((NI) 1) < LOC50)) goto LA51; fixlabel_539230_839829468(p0, lend0); } LA51: ; } N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA5: ; { Ttype292840* dest0; Tnode292802* arg0; Ttype292840* src0; Tloc292816 a0; Ropeobj178006* r0; NIM_BOOL isref0; Ttype292840* LOC10; dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320)); arg0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { while (1) { if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9; arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)]; } LA9: ; } src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, arg0, (&a0)); r0 = rdloc_538188_839829468((&a0)); LOC10 = (Ttype292840*)0; LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256)); isref0 = ((14680064 &((NU64)1<<((NU)((*LOC10).kind)&63U)))!=0); { if (!isref0) goto LA13; add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558)); } goto LA11; LA13: ; { add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; { NI i_558650_839829468; NI HEX3Atmp_558677_839829468; NI LOC17; NI res_558680_839829468; i_558650_839829468 = (NI)0; HEX3Atmp_558677_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = inheritancediff_326252_3876443242(dest0, src0); HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17)); res_558680_839829468 = ((NI) 2); { while (1) { if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19; i_558650_839829468 = res_558680_839829468; add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); res_558680_839829468 += ((NI) 1); } LA19: ; } } { if (!isref0) goto LA22; { NIM_BOOL LOC26; Ttype292840* LOC28; TY532811 LOC31; LOC26 = (NIM_BOOL)0; LOC26 = ((*d0).k == ((Tlockind292808) 0)); if (!(LOC26)) goto LA27; LOC28 = (Ttype292840*)0; LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256)); LOC26 = ((14680064 &((NU64)1<<((NU)((*LOC28).kind)&63U)))!=0); LA27: ; if (!LOC26) goto LA29; gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = rdloc_538188_839829468((&(*d0))); LOC31[1] = r0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2); } goto LA24; LA29: ; { r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0); putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA24: ; } goto LA20; LA22: ; { putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA20: ; } LA1: ; } N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* dest0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320)); { NIM_BOOL LOC3; NIM_BOOL LOC5; Ropeobj178006* r0; Ropeobj178006* nilcheck0; Ttype292840* t0; LOC3 = (NIM_BOOL)0; LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isobjlackingtypefield_533515_839829468(dest0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; r0 = rdloc_538188_839829468((&a0)); nilcheck0 = NIM_NIL; t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype292840* LOC23; if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA9; { if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12; nilcheck0 = r0; } LA12: ; { NIM_BOOL LOC16; NIM_BOOL LOC18; TY178507 LOC22; LOC16 = (NIM_BOOL)0; LOC16 = !(((*t0).kind == ((Ttypekind292244) 23))); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA19: ; LOC16 = !(LOC18); LA17: ; if (!LOC16) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = r0; r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1); } LA20: ; LOC23 = (Ttype292840*)0; LOC23 = lastson_295377_850551059(t0); t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256)); } LA9: ; } { NIM_BOOL LOC26; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA27: ; if (!!(LOC26)) goto LA28; { while (1) { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = ((*t0).kind == ((Ttypekind292244) 17)); if (!(LOC32)) goto LA33; LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA33: ; if (!LOC32) goto LA31; add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA31: ; } } LA28: ; { TY535238 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = r0; LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3); } goto LA34; LA36: ; { TY532811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = r0; LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2); } LA34: ; } LA6: ; { TY532811 LOC45; Ropeobj178006* LOC46; if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ); LOC45[1] = rdloc_538188_839829468((&a0)); LOC46 = (Ropeobj178006*)0; LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s); } goto LA41; LA43: ; { TY532811 LOC48; Ropeobj178006* LOC49; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = gettypedesc_535673_839829468((*p0).module, dest0); LOC48[1] = addrloc_538204_839829468((&a0)); LOC49 = (Ropeobj178006*)0; LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s); } LA41: ; } N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) { Tloc292816 a0; Ttype292840* dest0; memset((void*)(&a0), 0, sizeof(a0)); dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864)); { NIM_BOOL LOC3; Ttype292840* LOC5; TY532811 LOC8; Ropeobj178006* LOC9; LOC3 = (NIM_BOOL)0; LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype292840*)0; LOC5 = skiptypes_296099_850551059(dest0, 1048576); LOC3 = ((IL64(34084860461056) &((NU64)1<<((NU)((*LOC5).kind)&63U)))!=0); LA4: ; if (!LOC3) goto LA6; initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_535673_839829468((*p0).module, dest0); LOC8[1] = rdcharloc_538227_839829468((&a0)); LOC9 = (Ropeobj178006*)0; LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s); } goto LA1; LA6: ; { TY536475 LOC11; Ropeobj178006* LOC12; initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_535673_839829468((*p0).module, dest0); LOC11[1] = rdcharloc_538227_839829468((&a0)); LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0); LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0); LOC11[4] = rope_178277_2381377266(magic0); LOC12 = (Ropeobj178006*)0; LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5); putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s); } LA1: ; } N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* LOC1; TY178507 LOC2; Ropeobj178006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype292840*)0; LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_538188_839829468((&a0)); LOC3 = (Ropeobj178006*)0; LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1); putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s); } N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { Tloc292816 a0; Ttype292840* LOC1; TY178507 LOC2; Ropeobj178006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype292840*)0; LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_538188_839829468((&a0)); LOC3 = (Ropeobj178006*)0; LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1); putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s); gcusage_554439_839829468(n0); } static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0); return result0; } static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC2)) goto LA3; LOC2 = isroutine_297324_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { { NIM_BOOL LOC3; Ropeobj178006* tmp0; Ropeobj178006* LOC6; TY535238 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = isconstclosure_557810_839829468(n0); if (!LOC3) goto LA4; (*(*p0).module).labels += ((NI) 1); LOC6 = (Ropeobj178006*)0; LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels))); tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ); LOC7[1] = tmp0; LOC7[2] = genconstexpr_554849_839829468(p0, n0); addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3); putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1)); } goto LA1; LA4: ; { Tloc292816 tmp0; Tloc292816 a0; Tloc292816 b0; TY535238 LOC14; memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0)); { Tnode292802* LOC11; LOC11 = (Tnode292802*)0; LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]); if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567)); } LA12: ; gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_538188_839829468((&tmp0)); LOC14[1] = rdloc_538188_839829468((&a0)); LOC14[2] = rdloc_538188_839829468((&b0)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3); putlocintodest_539258_839829468(p0, d0, (&tmp0)); } LA1: ; } static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = rope_178401_2381377266(((NI64) ((*b0).id))); unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1)); result0 = (*b0).label; return result0; } N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) { NI casepos0; NI arraysize0; NI id0; Ropeobj178006* tmp0; TY178507 LOC27; Ropeobj178006* gotoarray0; TY532811 LOC28; TY178507 LOC33; NI topblock0; Ropeobj178006* oldbody0; Ropeobj178006* tailb0; Ropeobj178006* taila0; Tnode292802* casestmt0; Tloc292816 a_545871_839829468; TY532811 LOC41; { casepos0 = ((NI) -1); arraysize0 = (NI)0; { NI i_545768_839829468; NI HEX3Atmp_545934_839829468; NI LOC2; NI res_545937_839829468; i_545768_839829468 = (NI)0; HEX3Atmp_545934_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_293081_850551059(n0); HEX3Atmp_545934_839829468 = (LOC2 - 1); res_545937_839829468 = ((NI) 0); { while (1) { Tnode292802* it0; if (!(res_545937_839829468 <= HEX3Atmp_545934_839829468)) goto LA4; i_545768_839829468 = res_545937_839829468; it0 = (*n0).kindU.S6.sons->data[i_545768_839829468]; { NI64 asize0; if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7; { Tnode292802* LOC11; LOC11 = (Tnode292802*)0; LOC11 = lastson_295364_850551059(it0); if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12; localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570)); goto BeforeRet; } LA12: ; casepos0 = i_545768_839829468; asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); { if (!(IL64(10000) < asize0)) goto LA16; localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571)); goto BeforeRet; } LA16: ; arraysize0 = ((NI) (asize0)); { NI64 LOC20; LOC20 = (NI64)0; LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); if (!!((LOC20 == IL64(0)))) goto LA21; localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572)); goto BeforeRet; } LA21: ; } LA7: ; res_545937_839829468 += ((NI) 1); } LA4: ; } } { if (!(casepos0 < ((NI) 0))) goto LA25; localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573)); goto BeforeRet; } LA25: ; id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1)); (*p0).labels += (NI)(arraysize0 + ((NI) 1)); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rope_178401_2381377266(((NI64) (id0))); tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = tmp0; LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0))); gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2); { NI i_545819_839829468; NI HEX3Atmp_545942_839829468; NI res_545945_839829468; i_545819_839829468 = (NI)0; HEX3Atmp_545942_839829468 = (NI)0; HEX3Atmp_545942_839829468 = (NI)(arraysize0 - ((NI) 1)); res_545945_839829468 = ((NI) 1); { while (1) { TY178507 LOC32; if (!(res_545945_839829468 <= HEX3Atmp_545942_839829468)) goto LA31; i_545819_839829468 = res_545945_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468)))); addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1); res_545945_839829468 += ((NI) 1); } LA31: ; } } memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0)))); addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1); line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0); topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL); { NI j_545854_839829468; NI HEX3Atmp_545950_839829468; NI HEX3Atmp_545951_839829468; NI LOC35; NI res_545954_839829468; j_545854_839829468 = (NI)0; HEX3Atmp_545950_839829468 = (NI)0; HEX3Atmp_545951_839829468 = (NI)0; HEX3Atmp_545950_839829468 = (NI)(casepos0 + ((NI) 1)); LOC35 = (NI)0; LOC35 = len_293081_850551059(n0); HEX3Atmp_545951_839829468 = (LOC35 - 1); res_545954_839829468 = HEX3Atmp_545950_839829468; { while (1) { if (!(res_545954_839829468 <= HEX3Atmp_545951_839829468)) goto LA37; j_545854_839829468 = res_545954_839829468; genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]); res_545954_839829468 += ((NI) 1); } LA37: ; } } tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL); { NI j_545866_839829468; NI HEX3Atmp_545959_839829468; NI res_545962_839829468; j_545866_839829468 = (NI)0; HEX3Atmp_545959_839829468 = (NI)0; HEX3Atmp_545959_839829468 = (NI)(casepos0 - ((NI) 1)); res_545962_839829468 = ((NI) 0); { while (1) { if (!(res_545962_839829468 <= HEX3Atmp_545959_839829468)) goto LA40; j_545866_839829468 = res_545962_839829468; genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]); res_545962_839829468 += ((NI) 1); } LA40: ; } } taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0)); casestmt0 = (*n0).kindU.S6.sons->data[casepos0]; memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468)); initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468)); memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = rdloc_538188_839829468((&a_545871_839829468)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2); { NI i_545894_839829468; NI HEX3Atmp_545978_839829468; NI LOC43; NI res_545981_839829468; i_545894_839829468 = (NI)0; HEX3Atmp_545978_839829468 = (NI)0; LOC43 = (NI)0; LOC43 = len_293081_850551059(casestmt0); HEX3Atmp_545978_839829468 = (LOC43 - 1); res_545981_839829468 = ((NI) 1); { while (1) { TY533289 LOC46; NI LOC47; Tnode292802* it0; Tnode292802* LOC57; Ropeobj178006** LOC58; Ropeobj178006** LOC59; Tloc292816 a0; TY532811 LOC60; if (!(res_545981_839829468 <= HEX3Atmp_545978_839829468)) goto LA45; i_545894_839829468 = res_545981_839829468; memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (NI)0; LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468]; { NI j_545910_839829468; NI HEX3Atmp_545970_839829468; NI LOC49; NI res_545973_839829468; j_545910_839829468 = (NI)0; HEX3Atmp_545970_839829468 = (NI)0; LOC49 = (NI)0; LOC49 = len_293081_850551059(it0); HEX3Atmp_545970_839829468 = (NI)(LOC49 - ((NI) 2)); res_545973_839829468 = ((NI) 0); { while (1) { NI64 val0; TY178507 LOC56; if (!(res_545973_839829468 <= HEX3Atmp_545970_839829468)) goto LA51; j_545910_839829468 = res_545973_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54; localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA54: ; val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]); memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1); res_545973_839829468 += ((NI) 1); } LA51: ; } } LOC57 = (Tnode292802*)0; LOC57 = lastson_295364_850551059(it0); genstmts_539244_839829468(p0, LOC57); LOC58 = (Ropeobj178006**)0; LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC58, tailb0); LOC59 = (Ropeobj178006**)0; LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); add_178482_2381377266(LOC59, taila0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC60, 0, sizeof(LOC60)); LOC60[0] = tmp0; LOC60[1] = rdloc_538188_839829468((&a0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2); endblock_544060_839829468(p0); res_545981_839829468 += ((NI) 1); } LA45: ; } } }BeforeRet: ; } N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0) { Tloc292816 a0; NI oldbreakidx_546011_839829468; TY533289 LOC1; Tnode292802* loopbody0; memset((void*)(&a0), 0, sizeof(a0)); (*p0).withinloop += ((NI) 1); genlinedir_532823_839829468(p0, t0); oldbreakidx_546011_839829468 = (*p0).breakidx; memset((void*)LOC1, 0, sizeof(LOC1)); (*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); { NIM_BOOL LOC4; Ropeobj178006* label0; TY532811 LOC8; LOC4 = (NIM_BOOL)0; LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6))); if (LOC4) goto LA5; LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0)); LA5: ; if (!LOC4) goto LA6; label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx])); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468((&a0)); LOC8[1] = label0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2); } LA6: ; loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)]; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182)); if (!(LOC11)) goto LA12; LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0); LA12: ; if (!LOC11) goto LA13; { NIM_BOOL LOC17; NI LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NI)0; LOC18 = len_293081_850551059(loopbody0); LOC17 = (LOC18 == ((NI) 2)); if (!(LOC17)) goto LA19; LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)); LA19: ; if (!LOC17) goto LA20; loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)]; } LA20: ; gencomputedgoto_545744_839829468(p0, loopbody0); } goto LA9; LA13: ; { genstmts_539244_839829468(p0, loopbody0); } LA9: ; { TY533289 LOC27; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0); } LA25: ; endblock_544060_839829468(p0); (*p0).breakidx = oldbreakidx_546011_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) { { if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3; localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582)); } goto LA1; LA3: ; { TY178507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1); } LA1: ; } N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) { Tlib292820* lib0; Ropeobj178006* extname0; Ropeobj178006* tmp0; TY535235 LOC1; NimStringDesc* LOC2; TY532811 LOC3; lib0 = (*sym0).annex; extname0 = (*sym0).loc.r; loaddynamiclib_559481_839829468(m0, lib0); (*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8)); tmp0 = mangledynlibproc_538816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); (*m0).labels += ((NI) 2); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC1[1] = gettypedesc_535673_839829468(m0, (*sym0).typ); LOC1[2] = (*lib0).name; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_178856_2381377266(extname0); LOC1[3] = makecstring_191638_155036129(LOC2); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = (*sym0).loc.r; LOC3[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2); } N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) { { { Ropeobj178006* LOC5; if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3; LOC5 = (Ropeobj178006*)0; LOC5 = manglename_533205_839829468(s0); fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3)); } LA3: ; { Tcgen529027* q0; if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8; q0 = findpendingmodule_532241_839829468((*p0).module, s0); { NIM_BOOL LOC12; NIM_BOOL LOC14; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; varindynamiclib_538812_839829468(q0, s0); } goto LA10; LA15: ; { asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0)); } LA10: ; goto BeforeRet; } LA8: ; useheader_532369_839829468((*p0).module, s0); { if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20; goto BeforeRet; } LA20: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24; declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)); } goto LA22; LA24: ; { Ropeobj178006* decl0; Ropeobj178006* td0; decl0 = NIM_NIL; td0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t); { TY178507 LOC43; if (!(*s0).constraint == 0) goto LA29; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33; add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240)); } LA33: ; add_178482_2381377266(&decl0, td0); { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37; add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121)); } LA37: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41; add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*s0).loc.r; addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1); } goto LA27; LA29: ; { NimStringDesc* LOC45; TY532811 LOC46; LOC45 = (NimStringDesc*)0; LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3); appendString(LOC45, (*(*s0).constraint).kindU.S3.strval); appendString(LOC45, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC46, 0, sizeof(LOC46)); LOC46[0] = td0; LOC46[1] = (*s0).loc.r; decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2); } LA27: ; add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0); } LA22: ; { if (!(((NI) 0) < (*p0).withinloop)) goto LA49; resetloc_538350_839829468(p0, (&(*s0).loc)); } LA49: ; { TY535238 LOC55; NimStringDesc* LOC56; NimStringDesc* LOC57; if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC56 = (NimStringDesc*)0; LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1); appendString(LOC56, (*(*(*s0).owner).name).s); appendChar(LOC56, 46); appendString(LOC56, (*(*s0).name).s); LOC57 = (NimStringDesc*)0; LOC57 = nsuNormalize(LOC56); LOC55[0] = makecstring_191638_155036129(LOC57); LOC55[1] = (*s0).loc.r; LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ); appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3); } LA53: ; }BeforeRet: ; } N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) { Ropeobj178006* result0; Ropeobj178006* LOC1; Ttraversalclosure537019 c0; Tcproc529021* p0; Ropeobj178006* sloc0; Ropeobj178006* header0; TY178507 LOC8; Ropeobj178006* generatedproc0; TY535235 LOC9; Ropeobj178006** LOC10; Ropeobj178006** LOC11; Ropeobj178006** LOC12; TY178507 LOC13; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t); memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_529206_3723162438(NIM_NIL, m0); sloc0 = (*s0).loc.r; result0 = gettempname_533598_839829468(m0); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = emulatedthreadvars_532949_839829468(); LA5: ; if (!LOC4) goto LA6; accessthreadlocalvar_532945_839829468(p0, s0); sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0); } LA6: ; c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587)); c0.p = p0; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = result0; header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1); gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = header0; LOC10 = (Ropeobj178006**)0; LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0)); LOC9[1] = (*LOC10); LOC11 = (Ropeobj178006**)0; LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1)); LOC9[2] = (*LOC11); LOC12 = (Ropeobj178006**)0; LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); LOC9[3] = (*LOC12); generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = header0; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) { { NIM_BOOL LOC3; Ropeobj178006* prc0; Ropeobj178006** LOC7; TY178507 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0); if (!(LOC3)) goto LA4; LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t); LA4: ; if (!LOC3) goto LA5; prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0); LOC7 = (Ropeobj178006**)0; LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = prc0; appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1); } LA5: ; } static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3; result0 = NIM_FALSE; goto BeforeRet; } LA3: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = isinvalidreturntype_533550_839829468((*n0).typ); if (!LOC7) goto LA8; result0 = NIM_FALSE; goto BeforeRet; } LA8: ; result0 = NIM_TRUE; }BeforeRet: ; return result0; } N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) { { Ttype292840* LOC3; LOC3 = (Ttype292840*)0; LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4; genclosurecall_540452_839829468(p0, le0, ri0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_541929_839829468(p0, le0, ri0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_542616_839829468(p0, ri0, d0); } goto LA1; LA14: ; { genprefixcall_539960_839829468(p0, le0, ri0, d0); } LA1: ; poststmtactions_532942_839829468(p0); } static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))); if (LOC5) goto LA6; LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; genasgncall_543695_839829468(p0, le0, ri0, a0); } goto LA1; LA7: ; { if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10; genderef_543921_839829468(p0, ri0, a0, NIM_TRUE); } goto LA1; LA10: ; { expr_539248_839829468(p0, ri0, a0); } LA1: ; } N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) { Tsym292834* v0; Tcproc529021* targetproc0; { v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7; gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]); } LA7: ; goto BeforeRet; } LA3: ; targetproc0 = p0; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11; { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = (((*v0).flags & 96) == 32); if (!(LOC16)) goto LA17; LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*v0).loc.flags & 72) == 0)); LA18: ; if (!LOC15) goto LA19; goto BeforeRet; } LA19: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23; targetproc0 = (*(*p0).module).preinitproc; } LA23: ; assignglobalvar_538819_839829468(targetproc0, v0); genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE); { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0); if (!(LOC27)) goto LA28; LOC27 = !((generatedheader_532201_839829468 == NIM_NIL)); LA28: ; if (!LOC27) goto LA29; genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0); } LA29: ; registergcroot_543762_839829468(p0, v0); } goto LA9; LA11: ; { Tnode292802* value0; NIM_BOOL imm0; value0 = (*a0).kindU.S6.sons->data[((NI) 2)]; imm0 = isassignedimmediately_543781_839829468(value0); { NIM_BOOL LOC34; NIM_BOOL LOC35; NIM_BOOL LOC36; NIM_BOOL LOC38; NIM_BOOL LOC42; Ropeobj178006* decl0; Tloc292816 tmp0; LOC34 = (NIM_BOOL)0; LOC35 = (NIM_BOOL)0; LOC36 = (NIM_BOOL)0; LOC36 = imm0; if (!(LOC36)) goto LA37; LOC38 = (NIM_BOOL)0; LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC38) goto LA39; LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA39: ; LOC36 = LOC38; LA37: ; LOC35 = LOC36; if (!(LOC35)) goto LA40; LOC35 = ((*p0).splitdecls == ((NI) 0)); LA40: ; LOC34 = LOC35; if (!(LOC34)) goto LA41; LOC42 = (NIM_BOOL)0; LOC42 = containshiddenpointer_320120_3876443242((*v0).typ); LOC34 = !(LOC42); LA41: ; if (!LOC34) goto LA43; genlinedir_532823_839829468(p0, a0); decl0 = localvardecl_538532_839829468(p0, v0); memset((void*)(&tmp0), 0, sizeof(tmp0)); { NIM_BOOL LOC47; NIM_BOOL LOC48; Tnode292802* LOC50; Tnode292802* LOC52; Ropeobj178006* params0; Ttype292840* typ0; TY532811 LOC66; LOC47 = (NIM_BOOL)0; LOC48 = (NIM_BOOL)0; LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32)); if (!(LOC48)) goto LA49; LOC50 = (Tnode292802*)0; LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0)); LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3)); LA49: ; LOC47 = LOC48; if (!(LOC47)) goto LA51; LOC52 = (Tnode292802*)0; LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0)); LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0); LA51: ; if (!LOC47) goto LA53; params0 = (Ropeobj178006*)0; typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NI i_544619_839829468; NI HEX3Atmp_544825_839829468; NI LOC56; NI res_544828_839829468; i_544619_839829468 = (NI)0; HEX3Atmp_544825_839829468 = (NI)0; LOC56 = (NI)0; LOC56 = len_293081_850551059(value0); HEX3Atmp_544825_839829468 = (LOC56 - 1); res_544828_839829468 = ((NI) 1); { while (1) { Ropeobj178006* LOC65; if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58; i_544619_839829468 = res_544828_839829468; { TY533289 LOC63; Ropeobj178006* LOC64; if (!!((params0 == NIM_NIL))) goto LA61; memset((void*)LOC63, 0, sizeof(LOC63)); LOC64 = (Ropeobj178006*)0; LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0); add_178482_2381377266(&params0, LOC64); } LA61: ; LOC65 = (Ropeobj178006*)0; LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0); add_178482_2381377266(&params0, LOC65); res_544828_839829468 += ((NI) 1); } LA58: ; } } memset((void*)LOC66, 0, sizeof(LOC66)); LOC66[0] = decl0; LOC66[1] = params0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2); } goto LA45; LA53: ; { TY532811 LOC68; initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0)); memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = decl0; LOC68[1] = rdloc_538188_839829468((&tmp0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2); } LA45: ; goto BeforeRet; } LA43: ; assignlocalvar_538614_839829468(p0, v0); initlocalvar_538398_839829468(p0, v0, imm0); } LA9: ; { if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71; genlinedir_532823_839829468(targetproc0, a0); loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc)); } LA71: ; }BeforeRet: ; } N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) { NIM_BOOL immediateasgn0; immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))); { Tloc292816 v0; if (!immediateasgn0) goto LA3; memset((void*)(&v0), 0, sizeof(v0)); initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0)); genlinedir_532823_839829468(p0, a0); loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0)); } LA3: ; } N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) { Tloc292816 tup0; Tloc292816 field0; NI L0; NIM_BOOL uselowering0; Ttype292840* t0; { memset((void*)(&tup0), 0, sizeof(tup0)); memset((void*)(&field0), 0, sizeof(field0)); { if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA3: ; L0 = sonslen_295351_850551059(n0); uselowering0 = NIM_FALSE; { NI i_543822_839829468; NI HEX3Atmp_543905_839829468; NI res_543908_839829468; i_543822_839829468 = (NI)0; HEX3Atmp_543905_839829468 = (NI)0; HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3)); res_543908_839829468 = ((NI) 0); { while (1) { if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7; i_543822_839829468 = res_543908_839829468; { Tnode292802* LOC10; LOC10 = (Tnode292802*)0; LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468); if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11; uselowering0 = NIM_TRUE; goto LA5; } LA11: ; res_543908_839829468 += ((NI) 1); } LA7: ; } } LA5: ; { Tnode292802* LOC17; if (!uselowering0) goto LA15; LOC17 = (Tnode292802*)0; LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc); genstmts_539244_839829468(p0, LOC17); goto BeforeRet; } LA15: ; genlinedir_532823_839829468(p0, n0); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0)); t0 = getuniquetype_528640_2036603609(tup0.t); { NI i_543846_839829468; NI HEX3Atmp_543914_839829468; NI res_543917_839829468; i_543846_839829468 = (NI)0; HEX3Atmp_543914_839829468 = (NI)0; HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3)); res_543917_839829468 = ((NI) 0); { while (1) { if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20; i_543846_839829468 = res_543917_839829468; { Tsym292834* v0; v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24; goto LA21; } LA24: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28; assignglobalvar_538819_839829468(p0, v0); genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE); registergcroot_543762_839829468(p0, v0); } goto LA26; LA28: ; { Tnode292802* LOC31; NIM_BOOL LOC32; assignlocalvar_538614_839829468(p0, v0); LOC31 = (Tnode292802*)0; LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1))); LOC32 = (NIM_BOOL)0; LOC32 = isassignedimmediately_543781_839829468(LOC31); initlocalvar_538398_839829468(p0, v0, LOC32); } LA26: ; initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s); { TY532811 LOC37; if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_538188_839829468((&tup0)); LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468))); field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2); } goto LA33; LA35: ; { TY532811 LOC43; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41; internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = rdloc_538188_839829468((&tup0)); LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0); field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2); } LA33: ; putlocintodest_539258_839829468(p0, (&(*v0).loc), (&field0)); } LA21: ; res_543917_839829468 += ((NI) 1); } LA20: ; } } }BeforeRet: ; } N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) { { NI i_544869_839829468; NI HEX3Atmp_544902_839829468; NI LOC2; NI res_544905_839829468; i_544869_839829468 = (NI)0; HEX3Atmp_544902_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(n0); HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1)); res_544905_839829468 = ((NI) 0); { while (1) { if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4; i_544869_839829468 = res_544905_839829468; { Tnode292802* a0; a0 = (*n0).kindU.S6.sons->data[i_544869_839829468]; { if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8; goto LA5; } LA8: ; { if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12; { if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16; gensinglevar_544276_839829468(p0, a0); } goto LA14; LA16: ; { genclosurevar_544832_839829468(p0, a0); } LA14: ; } goto LA10; LA12: ; { genvartuple_543794_839829468(p0, a0); } LA10: ; } LA5: ; res_544905_839829468 += ((NI) 1); } LA4: ; } } } static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) { NIM_BOOL result0; NIM_BOOL LOC1; Tsym292834* LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0); if (LOC1) goto LA2; LOC3 = (Tsym292834*)0; LOC3 = getmodule_299123_2984716966(s0); LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) { { NI i_544924_839829468; NI HEX3Atmp_544975_839829468; NI LOC2; NI res_544978_839829468; i_544924_839829468 = (NI)0; HEX3Atmp_544975_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(t0); HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1)); res_544978_839829468 = ((NI) 0); { while (1) { if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4; i_544924_839829468 = res_544978_839829468; { Tnode292802* it0; Tsym292834* c0; it0 = (*t0).kindU.S6.sons->data[i_544924_839829468]; { if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8; goto LA5; } LA8: ; { if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12; internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593)); } LA12: ; c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC16; LOC16 = (NIM_BOOL)0; LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ); if (!LOC16) goto LA17; goto LA5; } goto LA14; LA17: ; { NIM_BOOL LOC20; NIM_BOOL LOC21; NI LOC24; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = ((17629200 &((NU64)1<<((NU)((*(*c0).typ).kind)&63U)))!=0); if (!(LOC21)) goto LA22; LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC24 = (NI)0; LOC24 = len_293081_850551059((*c0).ast); LOC20 = !((LOC24 == ((NI) 0))); LA23: ; if (!LOC20) goto LA25; { NIM_BOOL LOC29; LOC29 = (NIM_BOOL)0; LOC29 = emitlazily_532248_839829468(c0); if (!!(LOC29)) goto LA30; requestconstimpl_539240_839829468(p0, c0); } LA30: ; } goto LA14; LA25: ; LA14: ; } LA5: ; res_544978_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) { Tloc292816 x0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); length0 = sonslen_295351_850551059(b0); { NI i_547122_839829468; NI HEX3Atmp_547410_839829468; NI res_547413_839829468; i_547122_839829468 = (NI)0; HEX3Atmp_547410_839829468 = (NI)0; HEX3Atmp_547410_839829468 = (NI)(length0 - ((NI) 2)); res_547413_839829468 = ((NI) 0); { while (1) { NI j0; NI64 LOC4; TY535238 LOC5; if (!(res_547413_839829468 <= HEX3Atmp_547410_839829468)) goto LA3; i_547122_839829468 = res_547413_839829468; initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0)); LOC4 = (NI64)0; LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval); j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1)))))); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468(e0); LOC5[1] = rdloc_538188_839829468((&x0)); LOC5[2] = labl0; appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3); res_547413_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { TY533289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); expr_539248_839829468(p0, n0, d0); endblock_544060_839829468(p0); } N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) { Ropeobj178006* result0; Ropeobj178006* lend0; result0 = (Ropeobj178006*)0; lend0 = getlabel_539217_839829468(p0); { NI i_546984_839829468; NI res_547017_839829468; i_546984_839829468 = (NI)0; res_547017_839829468 = ((NI) 1); { while (1) { TY178507 LOC10; if (!(res_547017_839829468 <= until0)) goto LA3; i_546984_839829468 = res_547017_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = ((*d0).k == ((Tlockind292808) 1)); if (!(LOC6)) goto LA7; LOC6 = isemptytype_297441_850551059((*t0).typ); LA7: ; if (!LOC6) goto LA8; (*d0).k = ((Tlockind292808) 0); } LA8: ; memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468)))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1); { NI length0; TY178507 LOC15; if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13; length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]); exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = lend0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1); } goto LA11; LA13: ; { exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0); } LA11: ; res_547017_839829468 += ((NI) 1); } LA3: ; } } result0 = lend0; return result0; } N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) { Tloc292816 x0; Tloc292816 y0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); length0 = sonslen_295351_850551059(b0); { NI i_546932_839829468; NI HEX3Atmp_546958_839829468; NI res_546961_839829468; i_546932_839829468 = (NI)0; HEX3Atmp_546958_839829468 = (NI)0; HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2)); res_546961_839829468 = ((NI) 0); { while (1) { if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3; i_546932_839829468 = res_546961_839829468; { TY535235 LOC8; if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6; initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdcharloc_538227_839829468(e0); LOC8[1] = rdcharloc_538227_839829468((&x0)); LOC8[2] = rdcharloc_538227_839829468((&y0)); LOC8[3] = labl0; linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4); } goto LA4; LA6: ; { TY535238 LOC10; initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdcharloc_538227_839829468(e0); LOC10[1] = rdcharloc_538227_839829468((&x0)); LOC10[2] = labl0; linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3); } LA4: ; res_546961_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0) { Ropeobj178006* result0; NI labid0; result0 = (Ropeobj178006*)0; labid0 = (*p0).labels; { NI i_547042_839829468; NI res_547083_839829468; i_547042_839829468 = (NI)0; res_547083_839829468 = ((NI) 1); { while (1) { if (!(res_547083_839829468 <= until0)) goto LA3; i_547042_839829468 = res_547083_839829468; (*p0).labels += ((NI) 1); { Ropeobj178006* LOC8; Ropeobj178006* LOC9; if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6; LOC8 = (Ropeobj178006*)0; LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels))); LOC9 = (Ropeobj178006*)0; LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8); gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9); } goto LA4; LA6: ; { TY178507 LOC11; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1); } LA4: ; res_547083_839829468 += ((NI) 1); } LA3: ; } } { NI LOC14; NI gototarget0; TY178507 LOC17; TY178507 LOC18; LOC14 = (NI)0; LOC14 = len_293081_850551059(t0); if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15; (*p0).labels += ((NI) 1); gototarget0 = (*p0).labels; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1); result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1); } goto LA12; LA15: ; { result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0); } LA12: ; return result0; } N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) { Tloc292816 a0; Ropeobj178006* lend0; NI LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (NI)0; LOC1 = sonslen_295351_850551059(t0); lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), (&a0)); fixlabel_539230_839829468(p0, lend0); } N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { NI strings0; strings0 = ((NI) 0); { NI i_547435_839829468; NI HEX3Atmp_547550_839829468; NI LOC2; NI res_547553_839829468; i_547435_839829468 = (NI)0; HEX3Atmp_547550_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(t0); HEX3Atmp_547550_839829468 = (NI)(LOC2 - ((NI) 1)); res_547553_839829468 = ((NI) 1); { while (1) { if (!(res_547553_839829468 <= HEX3Atmp_547550_839829468)) goto LA4; i_547435_839829468 = res_547553_839829468; { NI LOC9; if (!((*(*t0).kindU.S6.sons->data[i_547435_839829468]).kind == ((Tnodekind292020) 85))) goto LA7; LOC9 = (NI)0; LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547435_839829468]); strings0 += (NI)(LOC9 - ((NI) 1)); } LA7: ; res_547553_839829468 += ((NI) 1); } LA4: ; } } { NI bitmask0; NI LOC14; TY191350* branches0; Tloc292816 a0; NI labid0; TY532811 LOC26; TY533289 LOC35; Ropeobj178006* lend0; NI LOC42; if (!(((NI) 8) < strings0)) goto LA12; LOC14 = (NI)0; LOC14 = nextpoweroftwo_101629_1009420244(strings0); bitmask0 = (NI)(LOC14 - ((NI) 1)); branches0 = (TY191350*)0; branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1))))); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); labid0 = (*p0).labels; { NI i_547484_839829468; NI HEX3Atmp_547560_839829468; NI LOC16; NI res_547563_839829468; i_547484_839829468 = (NI)0; HEX3Atmp_547560_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_295351_850551059(t0); HEX3Atmp_547560_839829468 = (NI)(LOC16 - ((NI) 1)); res_547563_839829468 = ((NI) 1); { while (1) { if (!(res_547563_839829468 <= HEX3Atmp_547560_839829468)) goto LA18; i_547484_839829468 = res_547563_839829468; (*p0).labels += ((NI) 1); { Ropeobj178006* LOC23; Ropeobj178006* LOC24; if (!((*(*t0).kindU.S6.sons->data[i_547484_839829468]).kind == ((Tnodekind292020) 85))) goto LA21; LOC23 = (Ropeobj178006*)0; LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels))); LOC24 = (Ropeobj178006*)0; LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23); gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547484_839829468], (&a0), LOC24, branches0->data, branches0->Sup.len); } goto LA19; LA21: ; { } LA19: ; res_547563_839829468 += ((NI) 1); } LA18: ; } } memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_538188_839829468((&a0)); LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0))); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2); { NI j_547518_839829468; NI HEX3Atmp_547568_839829468; NI res_547571_839829468; j_547518_839829468 = (NI)0; HEX3Atmp_547568_839829468 = (NI)0; HEX3Atmp_547568_839829468 = (branches0 ? (branches0->Sup.len-1) : -1); res_547571_839829468 = ((NI) 0); { while (1) { if (!(res_547571_839829468 <= HEX3Atmp_547568_839829468)) goto LA29; j_547518_839829468 = res_547571_839829468; { TY532811 LOC34; if (!!((branches0->data[j_547518_839829468] == NIM_NIL))) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = intliteral_539270_839829468(((NI64) (j_547518_839829468))); LOC34[1] = branches0->data[j_547518_839829468]; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2); } LA32: ; res_547571_839829468 += ((NI) 1); } LA29: ; } } memset((void*)LOC35, 0, sizeof(LOC35)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0); { NI LOC38; TY178507 LOC41; LOC38 = (NI)0; LOC38 = sonslen_295351_850551059(t0); if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels))); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1); } LA39: ; LOC42 = (NI)0; LOC42 = sonslen_295351_850551059(t0); lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1))); fixlabel_539230_839829468(p0, lend0); } goto LA10; LA12: ; { gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595)); } LA10: ; } N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) { { { NI i_545695_839829468; NI HEX3Atmp_545737_839829468; NI LOC2; NI res_545740_839829468; i_545695_839829468 = (NI)0; HEX3Atmp_545737_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_293081_850551059(casestmt0); HEX3Atmp_545737_839829468 = (LOC2 - 1); res_545740_839829468 = ((NI) 1); { while (1) { TY533289 LOC5; NI LOC6; Tnode292802* it0; Tnode292802* LOC16; if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4; i_545695_839829468 = res_545740_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NI)0; LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468]; { NI j_545711_839829468; NI HEX3Atmp_545730_839829468; NI LOC8; NI res_545733_839829468; j_545711_839829468 = (NI)0; HEX3Atmp_545730_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_293081_850551059(it0); HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2)); res_545733_839829468 = ((NI) 0); { while (1) { NI64 val0; TY178507 LOC15; if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10; j_545711_839829468 = res_545733_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13; localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA13: ; val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rope_178401_2381377266(val0); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1); res_545733_839829468 += ((NI) 1); } LA10: ; } } LOC16 = (Tnode292802*)0; LOC16 = lastson_295364_850551059(it0); genstmts_539244_839829468(p0, LOC16); endblock_544060_839829468(p0); res_545740_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; } N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { NI i_547591_839829468; NI HEX3Atmp_547609_839829468; NI LOC2; NI res_547612_839829468; i_547591_839829468 = (NI)0; HEX3Atmp_547609_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(b0); HEX3Atmp_547609_839829468 = (NI)(LOC2 - ((NI) 2)); res_547612_839829468 = ((NI) 0); { while (1) { if (!(res_547612_839829468 <= HEX3Atmp_547609_839829468)) goto LA4; i_547591_839829468 = res_547612_839829468; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*b0).kindU.S6.sons->data[i_547591_839829468]).kind == ((Tnodekind292020) 44)); if (!(LOC7)) goto LA8; LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval)); LA8: ; if (!LOC7) goto LA9; result0 = NIM_TRUE; goto BeforeRet; } LA9: ; res_547612_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; return result0; } N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0) { NI result0; result0 = (NI)0; { NI i_547631_839829468; NI HEX3Atmp_547655_839829468; NI LOC2; NI res_547658_839829468; i_547631_839829468 = (NI)0; HEX3Atmp_547655_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_293081_850551059(n0); HEX3Atmp_547655_839829468 = (NI)(LOC2 - ((NI) 1)); res_547658_839829468 = ((NI) 1); { while (1) { Tnode292802* branch0; Tnode292802* stmtblock0; if (!(res_547658_839829468 <= HEX3Atmp_547655_839829468)) goto LA4; i_547631_839829468 = res_547658_839829468; branch0 = HEX5BHEX5D_293238_850551059(n0, i_547631_839829468); stmtblock0 = lastson_295364_850551059(branch0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181)); if (!LOC7) goto LA8; result0 = i_547631_839829468; } goto LA5; LA8: ; { if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ((*branch0).kind == ((Tnodekind292020) 85)); if (!(LOC15)) goto LA16; LOC15 = branchhastoobigrange_547576_839829468(branch0); LA16: ; if (!LOC15) goto LA17; result0 = i_547631_839829468; } LA17: ; } goto LA5; LA11: ; LA5: ; res_547658_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { NI splitpoint0; Tloc292816 a0; Ropeobj178006* lend0; splitpoint0 = ifswitchsplitpoint_547616_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!(((NI) 0) < splitpoint0)) goto LA3; lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, (&a0)); } goto LA1; LA3: ; { lend0 = NIM_NIL; } LA1: ; { NI LOC8; TY178507 LOC11; NIM_BOOL hasdefault0; TY533289 LOC37; LOC8 = (NI)0; LOC8 = len_293081_850551059(n0); if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdcharloc_538227_839829468((&a0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1); hasdefault0 = NIM_FALSE; { NI i_547758_839829468; NI HEX3Atmp_547817_839829468; NI HEX3Atmp_547818_839829468; NI LOC13; NI res_547821_839829468; i_547758_839829468 = (NI)0; HEX3Atmp_547817_839829468 = (NI)0; HEX3Atmp_547818_839829468 = (NI)0; HEX3Atmp_547817_839829468 = (NI)(splitpoint0 + ((NI) 1)); LOC13 = (NI)0; LOC13 = len_293081_850551059(n0); HEX3Atmp_547818_839829468 = (LOC13 - 1); res_547821_839829468 = HEX3Atmp_547817_839829468; { while (1) { Tnode292802* branch0; Tnode292802* LOC28; TY533289 LOC29; if (!(res_547821_839829468 <= HEX3Atmp_547818_839829468)) goto LA15; i_547758_839829468 = res_547821_839829468; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = ((*d0).k == ((Tlockind292808) 1)); if (!(LOC18)) goto LA19; LOC18 = isemptytype_297441_850551059((*n0).typ); LA19: ; if (!LOC18) goto LA20; (*d0).k = ((Tlockind292808) 0); } LA20: ; branch0 = HEX5BHEX5D_293238_850551059(n0, i_547758_839829468); { if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24; gencaserange_537028_839829468(p0, branch0); } goto LA22; LA24: ; { TY533289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0); hasdefault0 = NIM_TRUE; } LA22: ; LOC28 = (Tnode292802*)0; LOC28 = lastson_295364_850551059(branch0); exprblock_544103_839829468(p0, LOC28, d0); memset((void*)LOC29, 0, sizeof(LOC29)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0); res_547821_839829468 += ((NI) 1); } LA15: ; } } { NIM_BOOL LOC32; TY533289 LOC36; LOC32 = (NIM_BOOL)0; LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0); if (!(LOC32)) goto LA33; LOC32 = !(hasdefault0); LA33: ; if (!LOC32) goto LA34; memset((void*)LOC36, 0, sizeof(LOC36)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0); } LA34: ; memset((void*)LOC37, 0, sizeof(LOC37)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0); } LA9: ; { if (!!((lend0 == NIM_NIL))) goto LA40; fixlabel_539230_839829468(p0, lend0); } LA40: ; } N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { Ttype292840* LOC8; genlinedir_532823_839829468(p0, t0); { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_297441_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind292808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (Ttype292840*)0; LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); switch ((*LOC8).kind) { case ((Ttypekind292244) 28): { genstringcase_547417_839829468(p0, t0, d0); } break; case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39): { gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601)); } break; default: { { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC14)) goto LA15; LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0); LA15: ; if (!LOC14) goto LA16; gengotoforcase_545673_839829468(p0, t0); } goto LA12; LA16: ; { genordinalcase_547725_839829468(p0, t0, d0); } LA12: ; } break; } } static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) { Tnode292802* result0; NI L0; result0 = (Tnode292802*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0))); return result0; } N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) { Tnodeseq292796* stack0; NI alreadypoppedcnt0; stack0 = (Tnodeseq292796*)0; stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0)); alreadypoppedcnt0 = (*p0).inexceptblock; { NI i_545471_839829468; NI res_545596_839829468; i_545471_839829468 = (NI)0; res_545596_839829468 = ((NI) 1); { while (1) { Tnode292802* trystmt0; Tnode292802* finallystmt0; if (!(res_545596_839829468 <= howmanytrys0)) goto LA3; i_545471_839829468 = res_545596_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA7: ; if (!!(LOC6)) goto LA8; { if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12; alreadypoppedcnt0 -= ((NI) 1); } goto LA10; LA12: ; { TY533289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0); } LA10: ; } LA8: ; trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts)); stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*)); asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0); ++stack0->Sup.len; finallystmt0 = lastson_295364_850551059(trystmt0); { if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18; genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]); } LA18: ; res_545596_839829468 += ((NI) 1); } LA3: ; } } { NI i_545546_839829468; NI HEX3Atmp_545601_839829468; NI res_545604_839829468; i_545546_839829468 = (NI)0; HEX3Atmp_545601_839829468 = (NI)0; HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1)); res_545604_839829468 = HEX3Atmp_545601_839829468; { while (1) { if (!(((NI) 0) <= res_545604_839829468)) goto LA22; i_545546_839829468 = res_545604_839829468; (*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]); ++(*p0).nestedtrystmts->Sup.len; res_545604_839829468 -= ((NI) 1); } LA22: ; } } { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC25) goto LA26; LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA26: ; if (!!(LOC25)) goto LA27; { NI i_545587_839829468; NI HEX3Atmp_545610_839829468; NI res_545613_839829468; i_545587_839829468 = (NI)0; HEX3Atmp_545610_839829468 = (NI)0; HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1)); res_545613_839829468 = HEX3Atmp_545610_839829468; { while (1) { TY533289 LOC32; if (!(((NI) 0) <= res_545613_839829468)) goto LA31; i_545587_839829468 = res_545613_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0); res_545613_839829468 -= ((NI) 1); } LA31: ; } } } LA27: ; } N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) { TY533289 LOC14; { { if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; (*p0).beforeretneeded = NIM_TRUE; genlinedir_532823_839829468(p0, t0); { if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7; genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock); { Ropeobj178006* safepoint0; TY178507 LOC13; if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11; safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))]; memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1); } LA11: ; memset((void*)LOC14, 0, sizeof(LOC14)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0); }BeforeRet: ; } N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) { NI idx0; Ropeobj178006* label0; TY178507 LOC16; idx0 = (*p0).breakidx; { Tsym292834* sym0; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3; sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; idx0 = (NI)((*sym0).position - ((NI) 1)); } goto LA1; LA3: ; { { while (1) { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (((NI) 0) <= idx0); if (!(LOC8)) goto LA9; LOC8 = !((*p0).blocks->data[idx0].isloop); LA9: ; if (!LOC8) goto LA7; idx0 -= ((NI) 1); } LA7: ; } { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (idx0 < ((NI) 0)); if (LOC12) goto LA13; LOC12 = !((*p0).blocks->data[idx0].isloop); LA13: ; if (!LOC12) goto LA14; internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609)); } LA14: ; } LA1: ; label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0])); blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts)))); genlinedir_532823_839829468(p0, t0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = label0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1); } N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { Tnode292802* le0; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3; le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)]; { Tsym292834* field0; if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7; field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0); } goto LA5; LA7: ; { Tsym292834* field0; if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10; field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0); } goto LA5; LA10: ; LA5: ; } LA3: ; return result0; } N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) { Ropeobj178006* result0; Ropeobj178006* LOC1; Ropeobj178006* tmp0; TY532811 LOC2; NI64 LOC3; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130)); tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = tmp0; LOC3 = (NI64)0; LOC3 = lengthord_320007_3876443242((*d0).typ); LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1))); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2); return result0; } N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0) { Ttype292840* t0; Ropeobj178006* LOC1; NI64 L0; TY535235 LOC8; t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864)); LOC1 = (Ropeobj178006*)0; LOC1 = gentypeinfo_535941_839829468((*p0).module, t0); L0 = lengthord_320007_3876443242((*field0).typ); { NIM_BOOL LOC4; TY178507 LOC7; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id); if (!!(LOC4)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0); appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1); } LA5: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_538188_839829468(a0); LOC8[1] = rdloc_538188_839829468(tmp0); LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0); LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1))); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4); } N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) { Tloc292816 a0; Tloc292816 tmp0; Tnode292802* dotexpr0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)]; { if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3; dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE); expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); gendiscriminantcheck_549144_839829468(p0, (&a0), (&tmp0), (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym); genassignment_539264_839829468(p0, (&a0), (&tmp0), 0); } N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) { genlinedir_532823_839829468(p0, e0); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)); if (!(LOC3)) goto LA4; LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA5: ; { NIM_BOOL LOC8; Tloc292816 a0; LOC8 = (NIM_BOOL)0; LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0); if (!!(LOC8)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); { Tnode292802* LOC13; Tnode292802* LOC16; LOC13 = (Tnode292802*)0; LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0)); if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14; LOC16 = (Tnode292802*)0; LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0)); genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE); } goto LA11; LA14: ; { initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA11: ; { if (!fastasgn0) goto LA20; a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8)); } LA20: ; loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); } goto LA1; LA9: ; { asgnfielddiscriminant_549209_839829468(p0, e0); } LA1: ; } N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) { Ropeobj178006* result0; NimStringDesc* res0; result0 = (Ropeobj178006*)0; res0 = copyString(((NimStringDesc*) &T839829468_490)); { NI i_548547_839829468; NI HEX3Atmp_548644_839829468; NI LOC2; NI res_548647_839829468; i_548547_839829468 = (NI)0; HEX3Atmp_548644_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(t0); HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1)); res_548647_839829468 = ((NI) 0); { while (1) { if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4; i_548547_839829468 = res_548647_839829468; switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) { case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22): { res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0); appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval); } break; case ((Tnodekind292020) 3): { Tsym292834* sym0; sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym; { Tloc292816 a0; Ropeobj178006* LOC11; NimStringDesc* LOC12; if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0)); LOC11 = (Ropeobj178006*)0; LOC11 = rdloc_538188_839829468((&a0)); LOC12 = (NimStringDesc*)0; LOC12 = HEX24_178856_2381377266(LOC11); res0 = resizeString(res0, LOC12->Sup.len + 0); appendString(res0, LOC12); } goto LA7; LA9: ; { Ropeobj178006* LOC16; NimStringDesc* LOC17; if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14; LOC16 = (Ropeobj178006*)0; LOC16 = gettypedesc_535673_839829468((*p0).module, (*sym0).typ); LOC17 = (NimStringDesc*)0; LOC17 = HEX24_178856_2381377266(LOC16); res0 = resizeString(res0, LOC17->Sup.len + 0); appendString(res0, LOC17); } goto LA7; LA14: ; { Ropeobj178006* r0; NimStringDesc* LOC23; r0 = (*sym0).loc.r; { if (!(r0 == NIM_NIL)) goto LA21; r0 = manglename_533205_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), r0); } LA21: ; LOC23 = (NimStringDesc*)0; LOC23 = HEX24_178856_2381377266(r0); res0 = resizeString(res0, LOC23->Sup.len + 0); appendString(res0, LOC23); } LA7: ; } break; default: { internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612)); } break; } res_548647_839829468 += ((NI) 1); } LA4: ; } } { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = isasmstmt0; if (!(LOC27)) goto LA28; LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0); LA28: ; if (!LOC27) goto LA29; { NimStringDesc* x_548604_839829468; NI first_548656_839829468; NI last_548658_839829468; x_548604_839829468 = (NimStringDesc*)0; first_548656_839829468 = ((NI) 0); last_548658_839829468 = ((NI) 0); { while (1) { NI j0; { while (1) { if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35; last_548658_839829468 += ((NI) 1); } LA35: ; } x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1))); j0 = ((NI) 0); { while (1) { if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37; j0 += ((NI) 1); } LA37: ; } { if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40; add_178487_2381377266(&result0, x_548604_839829468); add_178487_2381377266(&result0, tnl_176644_4151366050); } goto LA38; LA40: ; { if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613)); add_178487_2381377266(&result0, x_548604_839829468); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614)); } goto LA38; LA43: ; LA38: ; { if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47; last_548658_839829468 += ((NI) 1); } goto LA45; LA47: ; { if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50; last_548658_839829468 += ((NI) 1); { if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54; last_548658_839829468 += ((NI) 1); } LA54: ; } goto LA45; LA50: ; { goto LA32; } LA45: ; first_548656_839829468 = last_548658_839829468; } } LA32: ; } } goto LA25; LA29: ; { res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0); appendString(res0, tnl_176644_4151366050); result0 = rope_178277_2381377266(res0); } LA25: ; return result0; } N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) { Ropeobj178006* s0; genlinedir_532823_839829468(p0, t0); s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE); { TY178507 LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = s0; addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1); } goto LA1; LA3: ; { TY178507 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = s0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1); } LA1: ; } static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) { TY533289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); genstmts_539244_839829468(p0, stmts0); endblock_544060_839829468(p0); } N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { Ropeobj178006* exc0; TY533289 LOC16; NI LOC17; NI length0; TY178507 LOC18; Ropeobj178006* LOC19; NI i0; NIM_BOOL catchallpresent0; TY533289 LOC78; Tnode292802* LOC79; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_297441_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind292808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_532823_839829468(p0, t0); exc0 = gettempname_533598_839829468((*p0).module); { Tsym292834* LOC10; Ropeobj178006* LOC13; LOC10 = (Tsym292834*)0; LOC10 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC10 == NIM_NIL))) goto LA11; LOC13 = (Ropeobj178006*)0; LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA8; LA11: ; { Ropeobj178006* LOC15; LOC15 = (Ropeobj178006*)0; LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA8: ; (*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (NI)0; LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0); expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); length0 = sonslen_295351_850551059(t0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = exc0; LOC19 = (Ropeobj178006*)0; LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1); endblock_544035_839829468(p0, LOC19); { TY533289 LOC24; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0); } LA22: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); catchallpresent0 = NIM_FALSE; { while (1) { NIM_BOOL LOC27; NI blen0; LOC27 = (NIM_BOOL)0; LOC27 = (i0 < length0); if (!(LOC27)) goto LA28; LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87)); LA28: ; if (!LOC27) goto LA26; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = ((*d0).k == ((Tlockind292808) 1)); if (!(LOC31)) goto LA32; LOC31 = isemptytype_297441_850551059((*t0).typ); LA32: ; if (!LOC31) goto LA33; (*d0).k = ((Tlockind292808) 0); } LA33: ; blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]); { Ropeobj178006** LOC39; TY533289 LOC40; if (!(((NI) 1) < i0)) goto LA37; LOC39 = (Ropeobj178006**)0; LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); memset((void*)LOC40, 0, sizeof(LOC40)); addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0); } LA37: ; { TY533289 LOC45; NI LOC46; TY533289 LOC47; if (!(blen0 == ((NI) 1))) goto LA43; catchallpresent0 = NIM_TRUE; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0); endblock_544060_839829468(p0); } goto LA41; LA43: ; { Ropeobj178006* orexpr0; TY178507 LOC57; TY533289 LOC58; NI LOC59; TY533289 LOC60; orexpr0 = NIM_NIL; { NI j_547979_839829468; NI HEX3Atmp_548101_839829468; NI res_548104_839829468; j_547979_839829468 = (NI)0; HEX3Atmp_548101_839829468 = (NI)0; HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2)); res_548104_839829468 = ((NI) 0); { while (1) { TY532811 LOC56; if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51; j_547979_839829468 = res_548104_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA54; add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA54: ; memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = exc0; LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547979_839829468]).typ); appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2); res_548104_839829468 += ((NI) 1); } LA51: ; } } memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = orexpr0; linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1); memset((void*)LOC58, 0, sizeof(LOC58)); LOC59 = (NI)0; LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0); expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC60, 0, sizeof(LOC60)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0); endblock_544060_839829468(p0); } LA41: ; i0 += ((NI) 1); } LA26: ; } { TY533289 LOC70; NI LOC71; Tnode292802* finallyblock0; TY533289 LOC76; Ropeobj178006* LOC77; if (!!(catchallpresent0)) goto LA63; { TY533289 LOC69; if (!(((NI) 1) < i0)) goto LA67; memset((void*)LOC69, 0, sizeof(LOC69)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0); } LA67: ; memset((void*)LOC70, 0, sizeof(LOC70)); LOC71 = (NI)0; LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0); finallyblock0 = lastson_295364_850551059(t0); { if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74; genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA74: ; memset((void*)LOC76, 0, sizeof(LOC76)); LOC77 = (Ropeobj178006*)0; LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0); line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77); endblock_544060_839829468(p0); } LA63: ; memset((void*)LOC78, 0, sizeof(LOC78)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0); (*p0).inexceptblock -= ((NI) 1); LOC79 = (Tnode292802*)0; LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts)); { NIM_BOOL LOC82; LOC82 = (NIM_BOOL)0; LOC82 = (i0 < length0); if (!(LOC82)) goto LA83; LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107)); LA83: ; if (!LOC82) goto LA84; gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); } LA84: ; } N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) { Ropeobj178006** LOC1; Ropeobj178006* LOC2; Ropeobj178006* LOC3; LOC1 = (Ropeobj178006**)0; LOC1 = s_529179_3723162438(p0, s0); LOC2 = (Ropeobj178006*)0; LOC2 = rope_178277_2381377266(r0); LOC3 = (Ropeobj178006*)0; LOC3 = indentline_532656_839829468(p0, LOC2); add_178482_2381377266(LOC1, LOC3); } static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) { Ropeobj178006* result0; NI L0; result0 = (Ropeobj178006*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0))); return result0; } N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) { NIM_BOOL LOC8; Ropeobj178006* safepoint0; TY178507 LOC17; TY178507 LOC18; TY178507 LOC37; NI LOC38; NI length0; TY533289 LOC39; TY533289 LOC40; NI LOC41; TY533289 LOC42; NI i0; Tnode292802* LOC95; TY178507 LOC103; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_297441_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind292808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (NIM_BOOL)0; LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624)); genlinedir_532823_839829468(p0, t0); safepoint0 = gettempname_533598_839829468((*p0).module); { Tsym292834* LOC11; Ropeobj178006* LOC14; LOC11 = (Tsym292834*)0; LOC11 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC11 == NIM_NIL))) goto LA12; LOC14 = (Ropeobj178006*)0; LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA9; LA12: ; { Ropeobj178006* LOC16; LOC16 = (Ropeobj178006*)0; LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA9: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1); { NIM_BOOL LOC21; TY178507 LOC24; LOC21 = (NIM_BOOL)0; LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627)); if (!LOC21) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1); } goto LA19; LA22: ; { NIM_BOOL LOC26; TY178507 LOC29; LOC26 = (NIM_BOOL)0; LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629)); if (!LOC26) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1); } goto LA19; LA27: ; { NIM_BOOL LOC31; TY178507 LOC34; LOC31 = (NIM_BOOL)0; LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631)); if (!LOC31) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1); } goto LA19; LA32: ; { TY178507 LOC36; memset((void*)LOC36, 0, sizeof(LOC36)); LOC36[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1); } LA19: ; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = safepoint0; LOC38 = (NI)0; LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1); length0 = sonslen_295351_850551059(t0); (*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC39, 0, sizeof(LOC39)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0); endblock_544060_839829468(p0); memset((void*)LOC40, 0, sizeof(LOC40)); LOC41 = (NI)0; LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0); memset((void*)LOC42, 0, sizeof(LOC42)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0); { TY533289 LOC47; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45; memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0); } LA45: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); { while (1) { NIM_BOOL LOC50; NI blen0; LOC50 = (NIM_BOOL)0; LOC50 = (i0 < length0); if (!(LOC50)) goto LA51; LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87)); LA51: ; if (!LOC50) goto LA49; { NIM_BOOL LOC54; LOC54 = (NIM_BOOL)0; LOC54 = ((*d0).k == ((Tlockind292808) 1)); if (!(LOC54)) goto LA55; LOC54 = isemptytype_297441_850551059((*t0).typ); LA55: ; if (!LOC54) goto LA56; (*d0).k = ((Tlockind292808) 0); } LA56: ; blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]); { TY533289 LOC67; NI LOC68; TY178507 LOC69; TY533289 LOC70; if (!(blen0 == ((NI) 1))) goto LA60; { TY533289 LOC66; if (!(((NI) 1) < i0)) goto LA64; memset((void*)LOC66, 0, sizeof(LOC66)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0); } LA64: ; memset((void*)LOC67, 0, sizeof(LOC67)); LOC68 = (NI)0; LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1); expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC70, 0, sizeof(LOC70)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0); endblock_544060_839829468(p0); } goto LA58; LA60: ; { Ropeobj178006* orexpr0; TY178507 LOC91; NI LOC92; TY178507 LOC93; TY533289 LOC94; orexpr0 = NIM_NIL; { NI j_548247_839829468; NI HEX3Atmp_548521_839829468; NI res_548524_839829468; j_548247_839829468 = (NI)0; HEX3Atmp_548521_839829468 = (NI)0; HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2)); res_548524_839829468 = ((NI) 0); { while (1) { NimStringDesc* isobjformat0; TY178507 LOC86; if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74; j_548247_839829468 = res_548524_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA77; add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA77: ; { NIM_BOOL LOC81; LOC81 = (NIM_BOOL)0; LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC81) goto LA82; LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA82: ; if (!!(LOC81)) goto LA83; isobjformat0 = copyString(((NimStringDesc*) &T839829468_637)); } goto LA79; LA83: ; { isobjformat0 = copyString(((NimStringDesc*) &T839829468_638)); } LA79: ; memset((void*)LOC86, 0, sizeof(LOC86)); LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ); appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1); res_548524_839829468 += ((NI) 1); } LA74: ; } } { if (!(((NI) 1) < i0)) goto LA89; line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620)); } LA89: ; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = orexpr0; LOC92 = (NI)0; LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1); memset((void*)LOC93, 0, sizeof(LOC93)); LOC93[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1); expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC94, 0, sizeof(LOC94)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0); endblock_544060_839829468(p0); } LA58: ; i0 += ((NI) 1); } LA49: ; } (*p0).inexceptblock -= ((NI) 1); LOC95 = (Tnode292802*)0; LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts)); endblock_544060_839829468(p0); { NIM_BOOL LOC98; Ropeobj178006* LOC102; LOC98 = (NIM_BOOL)0; LOC98 = (i0 < length0); if (!(LOC98)) goto LA99; LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107)); LA99: ; if (!LOC98) goto LA100; (*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*)); asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0); ++(*p0).finallysafepoints->Sup.len; gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); LOC102 = (Ropeobj178006*)0; LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints)); } LA100: ; memset((void*)LOC103, 0, sizeof(LOC103)); LOC103[0] = safepoint0; linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1); } N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = copyString(((NimStringDesc*) &T839829468_641)); return result0; } N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) { { Tnode292802* finallyblock0; if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3; finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]); { if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7; gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; } LA3: ; { Tloc292816 a0; Ropeobj178006* e0; Ttype292840* typ0; NimStringDesc* LOC13; TY532811 LOC14; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); e0 = rdloc_538188_839829468((&a0)); typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320)); genlinedir_532823_839829468(p0, t0); LOC13 = (NimStringDesc*)0; LOC13 = getraisefrmt_546824_839829468(p0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = e0; LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2); } goto LA9; LA11: ; { genlinedir_532823_839829468(p0, t0); { NIM_BOOL LOC18; NIM_BOOL LOC19; TY533289 LOC24; Ropeobj178006* LOC25; LOC18 = (NIM_BOOL)0; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA20: ; LOC18 = LOC19; if (!(LOC18)) goto LA21; LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0)); LA21: ; if (!LOC18) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC25 = (Ropeobj178006*)0; LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0); line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25); } goto LA16; LA22: ; { TY533289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0); } LA16: ; } LA9: ; } N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) { } N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) { Tcfilesection529005 result0; result0 = (Tcfilesection529005)0; result0 = ((Tcfilesection529005) 7); { NIM_BOOL LOC3; NI LOC4; NimStringDesc* sec0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_293081_850551059(n0); LOC3 = (((NI) 1) <= LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22)); LA5: ; if (!LOC3) goto LA6; sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643)); if (!LOC10) goto LA11; result0 = ((Tcfilesection529005) 3); } goto LA8; LA11: ; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644)); if (!LOC14) goto LA15; result0 = ((Tcfilesection529005) 9); } goto LA8; LA15: ; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645)); if (!LOC18) goto LA19; result0 = ((Tcfilesection529005) 1); } goto LA8; LA19: ; LA8: ; } LA6: ; return result0; } N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) { Ropeobj178006* s0; s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE); { Tcfilesection529005 section0; Tnode292802* LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; LOC5 = (Tnode292802*)0; LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1)); section0 = determinesection_548819_839829468(LOC5); genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info); add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0); } goto LA1; LA3: ; { genlinedir_532823_839829468(p0, t0); line_532690_839829468(p0, ((Tcprocsection529011) 2), s0); } LA1: ; } N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) { NimStringDesc* name0; name0 = (NimStringDesc*)0; { TY535238 LOC12; NI LOC13; NimStringDesc* LOC14; if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3; { if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7; name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval); } goto LA5; LA7: ; { NimStringDesc* LOC10; NimStringDesc* LOC11; breakpointid_548860_839829468 += ((NI) 1); LOC10 = (NimStringDesc*)0; LOC11 = (NimStringDesc*)0; LOC11 = nimIntToStr(breakpointid_548860_839829468); LOC10 = rawNewString(LOC11->Sup.len + 2); appendString(LOC10, ((NimStringDesc*) &T839829468_646)); appendString(LOC10, LOC11); name0 = LOC10; } LA5: ; genlinedir_532823_839829468(p0, t0); memset((void*)LOC12, 0, sizeof(LOC12)); LOC13 = (NI)0; LOC13 = tolinenumber_192415_155036129((*t0).info); LOC12[0] = rope_178401_2381377266(((NI64) (LOC13))); LOC14 = (NimStringDesc*)0; LOC14 = tofilename_192257_155036129((*t0).info.fileindex); LOC12[1] = makecstring_191638_155036129(LOC14); LOC12[2] = makecstring_191638_155036129(name0); appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3); } LA3: ; } N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) { Tloc292816 a0; Ttype292840* typ0; TY535238 LOC5; NimStringDesc* LOC6; { { if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = addrloc_538204_839829468((&a0)); LOC6 = (NimStringDesc*)0; LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0); LOC5[1] = makecstring_191638_155036129(LOC6); LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0); linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3); }BeforeRet: ; } N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) { { NI i_549054_839829468; NI HEX3Atmp_549073_839829468; NI LOC2; NI res_549076_839829468; i_549054_839829468 = (NI)0; HEX3Atmp_549073_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_295351_850551059(n0); HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1)); res_549076_839829468 = ((NI) 0); { while (1) { Tnode292802* it0; Tspecialword275003 LOC5; if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4; i_549054_839829468 = res_549076_839829468; it0 = (*n0).kindU.S6.sons->data[i_549054_839829468]; LOC5 = (Tspecialword275003)0; LOC5 = whichpragma_318911_2616423590(it0); switch (LOC5) { case ((Tspecialword275003) 191): { genemit_548839_839829468(p_549041_839829468, it0); } break; case ((Tspecialword275003) 131): { genbreakpoint_548862_839829468(p_549041_839829468, it0); } break; case ((Tspecialword275003) 176): { genwatchpoint_549016_839829468(p_549041_839829468, it0); } break; case ((Tspecialword275003) 183): { Tcproc529021* p0; Ropeobj178006** LOC10; p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module); (*p0).options = ((*p0).options & ~ 98304); genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]); LOC10 = (Ropeobj178006**)0; LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2)); asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10)); } break; default: { } break; } res_549076_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) { NI oldbreakidx_546411_839829468; Tsym292834* forloopvar0; Tloc292816 rangea0; Tloc292816 rangeb0; Tnode292802* call0; TY535235 LOC1; NimStringDesc* LOC2; TY533289 LOC3; (*p0).withinloop += ((NI) 1); genlinedir_532823_839829468(p0, t0); oldbreakidx_546411_839829468 = (*p0).breakidx; forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)(&rangea0), 0, sizeof(rangea0)); memset((void*)(&rangeb0), 0, sizeof(rangeb0)); assignlocalvar_538614_839829468(p0, forloopvar0); call0 = (*t0).kindU.S6.sons->data[((NI) 1)]; initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0)); initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&(*forloopvar0).loc)); LOC1[1] = rdloc_538188_839829468((&rangea0)); LOC1[2] = rdloc_538188_839829468((&rangeb0)); LOC2 = (NimStringDesc*)0; LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]); LOC1[3] = rope_178277_2381377266(LOC2); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); (*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]); endblock_544060_839829468(p0); (*p0).breakidx = oldbreakidx_546411_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) { NI64 idx0; TY178507 LOC9; { NIM_BOOL LOC3; NI LOC4; NimStringDesc* LOC8; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_293081_850551059(n0); LOC3 = (LOC4 == ((NI) 1)); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)); LA5: ; if (!!(LOC3)) goto LA6; LOC8 = (NimStringDesc*)0; LOC8 = HEX24_196185_1689653243(T839829468_650); internalerror_196113_155036129(LOC8); } LA6: ; idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rope_178401_2381377266(idx0); linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1); } N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) { Tloc292816 a0; TY178507 LOC1; TY533289 LOC2; TY533289 LOC7; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_538188_839829468((&a0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1); (*p0).beforeretneeded = NIM_TRUE; memset((void*)LOC2, 0, sizeof(LOC2)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0); { NI64 i_544214_839829468; NI64 HEX3Atmp_544223_839829468; NI64 res_544226_839829468; i_544214_839829468 = (NI64)0; HEX3Atmp_544223_839829468 = (NI64)0; HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ); res_544226_839829468 = IL64(0); { while (1) { TY178507 LOC6; if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5; i_544214_839829468 = res_544226_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_178401_2381377266(i_544214_839829468); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1); res_544226_839829468 += ((NI) 1); } LA5: ; } } memset((void*)LOC7, 0, sizeof(LOC7)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0); } N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) { Tloc292816 a0; memset((void*)(&a0), 0, sizeof(a0)); { TY178507 LOC5; if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3; initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_538188_839829468((&a0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1); } goto LA1; LA3: ; { TY178507 LOC7; initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_538188_839829468((&a0)); linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1); } LA1: ; } N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) { switch ((*n0).kind) { case ((Tnodekind292020) 3): { Tsym292834* sym0; sym0 = (*n0).kindU.S4.sym; switch ((*sym0).kind) { case ((Tsymkind292435) 13): { { if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5; fillprocloc_539201_839829468(sym0); genprocprototype_539254_839829468((*p0).module, sym0); } goto LA3; LA5: ; { genproc_532951_839829468((*p0).module, sym0); } LA3: ; putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } break; case ((Tsymkind292435) 12): case ((Tsymkind292435) 15): case ((Tsymkind292435) 14): { { NimStringDesc* LOC13; if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48); appendString(LOC13, ((NimStringDesc*) &T839829468_270)); appendString(LOC13, (*(*sym0).name).s); localerror_196085_155036129((*n0).info, LOC13); } LA11: ; genproc_532951_839829468((*p0).module, sym0); { NIM_BOOL LOC16; NimStringDesc* LOC20; LOC16 = (NIM_BOOL)0; LOC16 = ((*sym0).loc.r == NIM_NIL); if (LOC16) goto LA17; LOC16 = ((*sym0).loc.t == NIM_NIL); LA17: ; if (!LOC16) goto LA18; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC20, ((NimStringDesc*) &T839829468_271)); appendString(LOC20, (*(*sym0).name).s); internalerror_196100_155036129((*n0).info, LOC20); } LA18: ; putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } break; case ((Tsymkind292435) 10): { { NIM_BOOL LOC24; Ropeobj178006* LOC27; LOC24 = (NIM_BOOL)0; LOC24 = issimpleconst_532311_839829468((*sym0).typ); if (!LOC24) goto LA25; LOC27 = (Ropeobj178006*)0; LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1)); } goto LA22; LA25: ; { gencomplexconst_558249_839829468(p0, sym0, d0); } LA22: ; } break; case ((Tsymkind292435) 19): { Ropeobj178006* LOC30; LOC30 = (Ropeobj178006*)0; LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position))); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0)); } break; case ((Tsymkind292435) 8): case ((Tsymkind292435) 20): case ((Tsymkind292435) 11): case ((Tsymkind292435) 9): { { if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34; genvarprototype_539236_839829468((*p0).module, sym0); } LA34: ; { NIM_BOOL LOC38; NimStringDesc* LOC42; NimStringDesc* LOC43; LOC38 = (NIM_BOOL)0; LOC38 = ((*sym0).loc.r == NIM_NIL); if (LOC38) goto LA39; LOC38 = ((*sym0).loc.t == NIM_NIL); LA39: ; if (!LOC38) goto LA40; LOC42 = (NimStringDesc*)0; LOC43 = (NimStringDesc*)0; LOC43 = nimIntToStr((*sym0).Sup.id); LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20); appendString(LOC42, ((NimStringDesc*) &T839829468_285)); appendString(LOC42, (*(*sym0).name).s); appendString(LOC42, ((NimStringDesc*) &T839829468_12)); appendString(LOC42, LOC43); internalerror_196100_155036129((*n0).info, LOC42); } LA40: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46; accessthreadlocalvar_532945_839829468(p0, sym0); { NIM_BOOL LOC50; Ropeobj178006* LOC53; LOC50 = (NIM_BOOL)0; LOC50 = emulatedthreadvars_532949_839829468(); if (!LOC50) goto LA51; LOC53 = (Ropeobj178006*)0; LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r); putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0)); } goto LA48; LA51: ; { putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } LA48: ; } goto LA44; LA46: ; { putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } LA44: ; } break; case ((Tsymkind292435) 5): { { NIM_BOOL LOC59; NimStringDesc* LOC63; NimStringDesc* LOC64; LOC59 = (NIM_BOOL)0; LOC59 = ((*sym0).loc.r == NIM_NIL); if (LOC59) goto LA60; LOC59 = ((*sym0).loc.t == NIM_NIL); LA60: ; if (!LOC59) goto LA61; LOC63 = (NimStringDesc*)0; LOC64 = (NimStringDesc*)0; LOC64 = nimIntToStr((*sym0).Sup.id); LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21); appendString(LOC63, ((NimStringDesc*) &T839829468_289)); appendString(LOC63, (*(*sym0).name).s); appendString(LOC63, ((NimStringDesc*) &T839829468_12)); appendString(LOC63, LOC64); internalerror_196100_155036129((*n0).info, LOC63); } LA61: ; putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } break; case ((Tsymkind292435) 3): { { NIM_BOOL LOC68; NimStringDesc* LOC72; NimStringDesc* LOC73; LOC68 = (NIM_BOOL)0; LOC68 = ((*sym0).loc.r == NIM_NIL); if (LOC68) goto LA69; LOC68 = ((*sym0).loc.t == NIM_NIL); LA69: ; if (!LOC68) goto LA70; LOC72 = (NimStringDesc*)0; LOC73 = (NimStringDesc*)0; LOC73 = nimIntToStr((*sym0).Sup.id); LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22); appendString(LOC72, ((NimStringDesc*) &T839829468_290)); appendString(LOC72, (*(*sym0).name).s); appendString(LOC72, ((NimStringDesc*) &T839829468_12)); appendString(LOC72, LOC73); internalerror_196100_155036129((*n0).info, LOC72); } LA70: ; putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } break; default: { NimStringDesc* LOC75; LOC75 = (NimStringDesc*)0; LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22); appendString(LOC75, ((NimStringDesc*) &T839829468_291)); appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435))); appendString(LOC75, ((NimStringDesc*) &T839829468_292)); internalerror_196100_155036129((*n0).info, LOC75); } break; } } break; case ((Tnodekind292020) 23): { { NIM_BOOL LOC79; Ropeobj178006* LOC82; LOC79 = (NIM_BOOL)0; LOC79 = isemptytype_297441_850551059((*n0).typ); if (!!(LOC79)) goto LA80; LOC82 = (Ropeobj178006*)0; LOC82 = genliteral_539273_839829468(p0, n0); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0)); } LA80: ; } break; case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22): { Ropeobj178006* LOC84; LOC84 = (Ropeobj178006*)0; LOC84 = genliteral_539273_839829468(p0, n0); putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84); } break; case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15): case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19): case ((Tnodekind292020) 5): { Ropeobj178006* LOC86; LOC86 = (Ropeobj178006*)0; LOC86 = genliteral_539273_839829468(p0, n0); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0)); } break; case ((Tnodekind292020) 27): case ((Tnodekind292020) 32): case ((Tnodekind292020) 29): case ((Tnodekind292020) 30): case ((Tnodekind292020) 31): case ((Tnodekind292020) 26): case ((Tnodekind292020) 28): { Tnode292802* op0; genlinedir_532823_839829468(p0, n0); op0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { Tloc292816 a0; if (!(*n0).typ == 0) goto LA90; memset((void*)(&a0), 0, sizeof(a0)); { NIM_BOOL LOC94; LOC94 = (NIM_BOOL)0; LOC94 = ((*op0).kind == ((Tnodekind292020) 3)); if (!(LOC94)) goto LA95; LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0))); LA95: ; if (!LOC94) goto LA96; genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic); } goto LA92; LA96: ; { gencall_543632_839829468(p0, n0, (&a0)); } LA92: ; } goto LA88; LA90: ; { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = ((*op0).kind == ((Tnodekind292020) 3)); if (!(LOC102)) goto LA103; LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0))); LA103: ; if (!LOC102) goto LA104; genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic); } goto LA100; LA104: ; { gencall_543632_839829468(p0, n0, d0); } LA100: ; } LA88: ; } break; case ((Tnodekind292020) 39): { { NIM_BOOL LOC110; NI LOC112; Ropeobj178006* LOC115; LOC110 = (NIM_BOOL)0; LOC110 = isdeepconstexpr_318566_2616423590(n0); if (!(LOC110)) goto LA111; LOC112 = (NI)0; LOC112 = len_293081_850551059(n0); LOC110 = !((LOC112 == ((NI) 0))); LA111: ; if (!LOC110) goto LA113; LOC115 = (Ropeobj178006*)0; LOC115 = gensetnode_549664_839829468(p0, n0); putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0)); } goto LA108; LA113: ; { gensetconstr_557496_839829468(p0, n0, d0); } LA108: ; } break; case ((Tnodekind292020) 41): { { NIM_BOOL LOC120; NI LOC122; LOC120 = (NIM_BOOL)0; LOC120 = isdeepconstexpr_318566_2616423590(n0); if (!(LOC120)) goto LA121; LOC122 = (NI)0; LOC122 = len_293081_850551059(n0); LOC120 = !((LOC122 == ((NI) 0))); LA121: ; if (!LOC120) goto LA123; exprcomplexconst_558684_839829468(p0, n0, d0); } goto LA118; LA123: ; { Ttype292840* LOC126; LOC126 = (Ttype292840*)0; LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440)); if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127; genseqconstr_555004_839829468(p0, n0, d0); } goto LA118; LA127: ; { genarrayconstr_558207_839829468(p0, n0, d0); } LA118: ; } break; case ((Tnodekind292020) 37): { { NIM_BOOL LOC133; NI LOC135; LOC133 = (NIM_BOOL)0; LOC133 = isdeepconstexpr_318566_2616423590(n0); if (!(LOC133)) goto LA134; LOC135 = (NI)0; LOC135 = len_293081_850551059(n0); LOC133 = !((LOC135 == ((NI) 0))); LA134: ; if (!LOC133) goto LA136; exprcomplexconst_558684_839829468(p0, n0, d0); } goto LA131; LA136: ; { gentupleconstr_557618_839829468(p0, n0, d0); } LA131: ; } break; case ((Tnodekind292020) 38): { genobjconstr_554903_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 61): { gencast_556538_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 58): case ((Tnodekind292020) 59): case ((Tnodekind292020) 60): { genconv_556633_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 64): case ((Tnodekind292020) 63): { genaddr_553051_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 42): { genbracketexpr_554277_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 47): case ((Tnodekind292020) 65): { genderef_543921_839829468(p0, n0, d0, NIM_FALSE); } break; case ((Tnodekind292020) 45): { genrecordfield_553448_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 46): { gencheckedrecordfield_554046_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 127): case ((Tnodekind292020) 112): { genblock_546083_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 126): { genstmtlistexpr_558402_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 115): { { NI i_559023_839829468; NI HEX3Atmp_559276_839829468; NI LOC151; NI res_559279_839829468; i_559023_839829468 = (NI)0; HEX3Atmp_559276_839829468 = (NI)0; LOC151 = (NI)0; LOC151 = sonslen_295351_850551059(n0); HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1)); res_559279_839829468 = ((NI) 0); { while (1) { if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153; i_559023_839829468 = res_559279_839829468; genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]); res_559279_839829468 += ((NI) 1); } LA153: ; } } } break; case ((Tnodekind292020) 48): case ((Tnodekind292020) 92): { genif_544982_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 93): { expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0); } break; case ((Tnodekind292020) 66): { downconv_558581_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 67): { upconv_558431_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 68): { genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563)); } break; case ((Tnodekind292020) 69): { genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564)); } break; case ((Tnodekind292020) 70): { genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565)); } break; case ((Tnodekind292020) 71): { convstrtocstr_556643_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 72): { convcstrtostr_556655_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 51): case ((Tnodekind292020) 52): { Tsym292834* sym0; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; genproc_532951_839829468((*p0).module, sym0); { NIM_BOOL LOC166; NimStringDesc* LOC170; LOC166 = (NIM_BOOL)0; LOC166 = ((*sym0).loc.r == NIM_NIL); if (LOC166) goto LA167; LOC166 = ((*sym0).loc.t == NIM_NIL); LA167: ; if (!LOC166) goto LA168; LOC170 = (NimStringDesc*)0; LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC170, ((NimStringDesc*) &T839829468_271)); appendString(LOC170, (*(*sym0).name).s); internalerror_196100_155036129((*n0).info, LOC170); } LA168: ; putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc)); } break; case ((Tnodekind292020) 155): { genclosure_557836_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 1): { } break; case ((Tnodekind292020) 96): { genwhilestmt_545985_839829468(p0, n0); } break; case ((Tnodekind292020) 99): case ((Tnodekind292020) 100): { genvarstmt_544854_839829468(p0, n0); } break; case ((Tnodekind292020) 101): { genconststmt_544909_839829468(p0, n0); } break; case ((Tnodekind292020) 94): { internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594)); } break; case ((Tnodekind292020) 97): { gencase_547827_839829468(p0, n0, d0); } break; case ((Tnodekind292020) 109): { genreturnstmt_545617_839829468(p0, n0); } break; case ((Tnodekind292020) 110): { genbreakstmt_546444_839829468(p0, n0); } break; case ((Tnodekind292020) 73): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183; genasgn_549239_839829468(p0, n0, NIM_FALSE); } LA183: ; } break; case ((Tnodekind292020) 74): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188; genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL))); } LA188: ; } break; case ((Tnodekind292020) 114): { { Tloc292816 a0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193; genlinedir_532823_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA193: ; } break; case ((Tnodekind292020) 89): { genasmstmt_548659_839829468(p0, n0); } break; case ((Tnodekind292020) 106): { { NIM_BOOL LOC199; NIM_BOOL LOC200; LOC199 = (NIM_BOOL)0; LOC200 = (NIM_BOOL)0; LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2)); if (LOC200) goto LA201; LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA201: ; LOC199 = LOC200; if (!(LOC199)) goto LA202; LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0)); LA202: ; if (!LOC199) goto LA203; gentrycpp_547866_839829468(p0, n0, d0); } goto LA197; LA203: ; { gentry_548114_839829468(p0, n0, d0); } LA197: ; } break; case ((Tnodekind292020) 108): { genraisestmt_546828_839829468(p0, n0); } break; case ((Tnodekind292020) 98): { gentypesection_538184_839829468((*p0).module, n0); } break; case ((Tnodekind292020) 125): case ((Tnodekind292020) 84): case ((Tnodekind292020) 121): case ((Tnodekind292020) 116): case ((Tnodekind292020) 117): case ((Tnodekind292020) 118): case ((Tnodekind292020) 119): case ((Tnodekind292020) 120): case ((Tnodekind292020) 83): case ((Tnodekind292020) 82): { } break; case ((Tnodekind292020) 90): { genpragma_549039_839829468(p0, n0); } break; case ((Tnodekind292020) 91): { Tnode292802* LOC211; LOC211 = (Tnode292802*)0; LOC211 = lastson_295364_850551059(n0); expr_539248_839829468(p0, LOC211, d0); } break; case ((Tnodekind292020) 79): case ((Tnodekind292020) 80): case ((Tnodekind292020) 81): { { Tsym292834* prc0; if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215; prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC219; Tsym292834* LOC220; LOC219 = (NIM_BOOL)0; LOC220 = (Tsym292834*)0; LOC220 = skipgenericowner_297280_850551059(prc0); LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6)); if (!(LOC219)) goto LA221; LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)); LA221: ; if (!LOC219) goto LA222; { NIM_BOOL LOC226; NIM_BOOL LOC227; NIM_BOOL LOC228; NIM_BOOL LOC229; Tsym292834* LOC231; NIM_BOOL LOC234; LOC226 = (NIM_BOOL)0; LOC227 = (NIM_BOOL)0; LOC228 = (NIM_BOOL)0; LOC229 = (NIM_BOOL)0; LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)); if (!(LOC229)) goto LA230; LOC231 = (Tsym292834*)0; LOC231 = getmodule_299123_2984716966(prc0); LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)); LA230: ; LOC228 = LOC229; if (LOC228) goto LA232; LOC228 = ((65600 & (*prc0).flags) == 64); LA232: ; LOC227 = LOC228; if (LOC227) goto LA233; LOC234 = (NIM_BOOL)0; LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0); if (!(LOC234)) goto LA235; LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0); LA235: ; LOC227 = LOC234; LA233: ; LOC226 = LOC227; if (LOC226) goto LA236; LOC226 = ((*prc0).kind == ((Tsymkind292435) 13)); LA236: ; if (!LOC226) goto LA237; { NIM_BOOL LOC241; Tnode292802* LOC242; LOC241 = (NIM_BOOL)0; LOC242 = (Tnode292802*)0; LOC242 = getbody_335226_1724185294(prc0); LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1))); if (LOC241) goto LA243; LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0); LA243: ; if (!LOC241) goto LA244; genproc_532951_839829468((*p0).module, prc0); } LA244: ; } LA237: ; } LA222: ; } LA215: ; } break; case ((Tnodekind292020) 95): { genparforstmt_546208_839829468(p0, n0); } break; case ((Tnodekind292020) 157): { genstate_544117_839829468(p0, n0); } break; case ((Tnodekind292020) 156): { gengotostate_544144_839829468(p0, n0); } break; case ((Tnodekind292020) 158): { genbreakstate_544229_839829468(p0, n0); } break; default: { NimStringDesc* LOC251; LOC251 = (NimStringDesc*)0; LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25); appendString(LOC251, ((NimStringDesc*) &T839829468_291)); appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020))); appendString(LOC251, ((NimStringDesc*) &T839829468_657)); internalerror_196100_155036129((*n0).info, LOC251); } break; } } N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) { Tloc292816 a0; memset((void*)(&a0), 0, sizeof(a0)); expr_539248_839829468(p0, t0, (&a0)); { NimStringDesc* LOC5; if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_196185_1689653243(T839829468_658); internalerror_196113_155036129(LOC5); } LA3: ; } N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) { Tnode292802* result0; Tcgen529027* m0; { result0 = (Tnode292802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_341085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen529027*) (b0)); (*(*m0).initproc).options = initprocoptions_562635_839829468(m0); genstmts_539244_839829468((*m0).initproc, n0); }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { NimStringDesc* LOC5; if (!((12288 & (*m0).flags) == 0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s); result0 = rope_178277_2381377266(LOC5); add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12)); } LA3: ; add_178487_2381377266(&result0, (*(*m0).name).s); add_178487_2381377266(&result0, suffix0); return result0; } N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659)); return result0; } N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660)); return result0; } N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) { Ropeobj178006* init0; Ropeobj178006* datinit0; TY178507 LOC1; TY178507 LOC2; init0 = getinitname_562235_839829468(m0); datinit0 = getdatinitname_562239_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = init0; addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = datinit0; addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1); { TY178507 LOC7; Ropeobj178006* initcall0; TY178507 LOC8; if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = datinit0; addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = init0; initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1); { if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11; add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0); } goto LA9; LA11: ; { add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0); } LA9: ; } LA5: ; } N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; LOC1 = (Ropeobj178006*)0; LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673)); result0 = NIM_NIL; { NI i_561717_839829468; NI HEX3Atmp_561722_839829468; NI res_561725_839829468; i_561717_839829468 = (NI)0; HEX3Atmp_561722_839829468 = (NI)0; HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1); res_561725_839829468 = ((NI) 0); { while (1) { TY178507 LOC5; if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4; i_561717_839829468 = res_561725_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1); res_561725_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) { NimStringDesc* nimmain0; NimStringDesc* othermain0; Ropeobj178006* initstackbottomcall0; TY536475 LOC38; TY535238 LOC47; nimmain0 = (NimStringDesc*)0; othermain0 = (NimStringDesc*)0; { NIM_BOOL LOC3; NIM_BOOL LOC12; LOC3 = (NIM_BOOL)0; LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2)); if (!(LOC3)) goto LA4; LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0)); LA4: ; if (!LOC3) goto LA5; { if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9; nimmain0 = copyString(((NimStringDesc*) &T839829468_663)); othermain0 = copyString(((NimStringDesc*) &T839829468_664)); } goto LA7; LA9: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_666)); } LA7: ; LOC12 = (NIM_BOOL)0; LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667)); } goto LA1; LA5: ; { if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14; nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_668)); } goto LA1; LA14: ; { if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17; nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_670)); } goto LA1; LA17: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_671)); } LA1: ; { Ropeobj178006* LOC24; if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22; LOC24 = (Ropeobj178006*)0; LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672)); } LA22: ; { Ropeobj178006* LOC29; if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27; LOC29 = (Ropeobj178006*)0; LOC29 = genfilenames_561688_839829468(m0); add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29); } LA27: ; { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24)); if (LOC32) goto LA33; LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0)); LA33: ; if (!LOC32) goto LA34; initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490)); } goto LA30; LA34: ; { TY533289 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0); } LA30: ; (*m0).labels += ((NI) 1); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = maindatinit_529151_3723162438; LOC38[1] = gbreakpoints_548861_839829468; LOC38[2] = othermodsinit_529150_3723162438; { NIM_BOOL LOC41; TY533289 LOC45; LOC41 = (NIM_BOOL)0; LOC41 = emulatedthreadvars_532949_839829468(); if (!(LOC41)) goto LA42; LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24))); LA42: ; if (!LOC41) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0); } goto LA39; LA43: ; { LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490)); } LA39: ; LOC38[4] = initstackbottomcall0; appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = mainmodinit_529149_3723162438; LOC47[1] = initstackbottomcall0; LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels))); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3); { TY533289 LOC52; if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50; memset((void*)LOC52, 0, sizeof(LOC52)); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0); } LA50: ; } N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) { Tnode292802* result0; Tcgen529027* m0; { result0 = (Tnode292802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_341085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen529027*) (b0)); { if (!!((n0 == NIM_NIL))) goto LA9; (*(*m0).initproc).options = initprocoptions_562635_839829468(m0); genstmts_539244_839829468((*m0).initproc, n0); } LA9: ; registermoduletomain_562243_839829468((*m0).module); { Tnode292802* disp0; if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13; (*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8)); disp0 = generatemethoddispatchers_432151_3853300031(); { NI i_563891_839829468; NI HEX3Atmp_563895_839829468; NI LOC16; NI res_563898_839829468; i_563891_839829468 = (NI)0; HEX3Atmp_563895_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_295351_850551059(disp0); HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1)); res_563898_839829468 = ((NI) 0); { while (1) { if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18; i_563891_839829468 = res_563898_839829468; genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym); res_563898_839829468 += ((NI) 1); } LA18: ; } } genmainproc_561729_839829468(m0); } LA13: ; }BeforeRet: ; return result0; } N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Tsym292834* prc0; if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2; prc0 = (*m0).forwardedprocs->data[i0]; { NimStringDesc* LOC7; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5; LOC7 = (NimStringDesc*)0; LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17); appendString(LOC7, ((NimStringDesc*) &T839829468_678)); appendString(LOC7, (*(*prc0).name).s); internalerror_196100_155036129((*prc0).info, LOC7); } LA5: ; genprocnoforward_560906_839829468(m0, prc0); i0 += ((NI) 1); } LA2: ; } gforwardedprocscounter_529171_3723162438 -= i0; (*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0)); } N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) { Ropeobj178006* initname0; Ropeobj178006* prc0; TY178507 LOC1; Ropeobj178006* LOC12; Ropeobj178006* LOC13; Ropeobj178006** LOC14; Ropeobj178006** LOC15; Ropeobj178006** LOC16; Ropeobj178006* LOC17; Ropeobj178006* LOC33; Ropeobj178006** LOC34; Ropeobj178006** LOC35; Ropeobj178006** LOC36; Ropeobj178006* LOC37; Ropeobj178006* LOC38; Ropeobj178006** LOC39; Ropeobj178006** LOC40; Ropeobj178006** LOC41; Ropeobj178006* LOC42; Ropeobj178006* LOC50; TY533289 LOC51; TY178507 LOC52; TY533289 LOC58; initname0 = getinitname_562235_839829468((*m0).module); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = initname0; prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1); { TY532811 LOC6; if (!(((NI) 0) < (*m0).typenodes)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = (*m0).typenodesname; LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes))); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2); } LA4: ; { TY532811 LOC11; if (!(((NI) 0) < (*m0).nimtypes)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*m0).nimtypesname; LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes))); appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2); } LA9: ; LOC12 = (Ropeobj178006*)0; LOC12 = initgcframe_538435_839829468((*m0).initproc); add_178482_2381377266(&prc0, LOC12); LOC13 = (Ropeobj178006*)0; LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0)); add_178482_2381377266(&prc0, LOC13); LOC14 = (Ropeobj178006**)0; LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0)); add_178482_2381377266(&prc0, (*LOC14)); LOC15 = (Ropeobj178006**)0; LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0)); add_178482_2381377266(&prc0, (*LOC15)); LOC16 = (Ropeobj178006**)0; LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0)); add_178482_2381377266(&prc0, (*LOC16)); LOC17 = (Ropeobj178006*)0; LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0)); add_178482_2381377266(&prc0, LOC17); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0); if (!(LOC20)) goto LA21; LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0)); LA21: ; if (!LOC20) goto LA22; (*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8)); { Ropeobj178006* procname0; Ropeobj178006* LOC28; Ropeobj178006* LOC29; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26; procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s); LOC28 = (Ropeobj178006*)0; LOC28 = quotedfilename_196818_155036129((*(*m0).module).info); LOC29 = (Ropeobj178006*)0; LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28); add_178482_2381377266(&prc0, LOC29); } goto LA24; LA26: ; { TY533289 LOC31; Ropeobj178006* LOC32; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj178006*)0; LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0); add_178482_2381377266(&prc0, LOC32); } LA24: ; } LA22: ; LOC33 = (Ropeobj178006*)0; LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1)); add_178482_2381377266(&prc0, LOC33); LOC34 = (Ropeobj178006**)0; LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1)); add_178482_2381377266(&prc0, (*LOC34)); LOC35 = (Ropeobj178006**)0; LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1)); add_178482_2381377266(&prc0, (*LOC35)); LOC36 = (Ropeobj178006**)0; LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1)); add_178482_2381377266(&prc0, (*LOC36)); LOC37 = (Ropeobj178006*)0; LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1)); add_178482_2381377266(&prc0, LOC37); LOC38 = (Ropeobj178006*)0; LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2)); add_178482_2381377266(&prc0, LOC38); LOC39 = (Ropeobj178006**)0; LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2)); add_178482_2381377266(&prc0, (*LOC39)); LOC40 = (Ropeobj178006**)0; LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2)); add_178482_2381377266(&prc0, (*LOC40)); LOC41 = (Ropeobj178006**)0; LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2)); add_178482_2381377266(&prc0, (*LOC41)); LOC42 = (Ropeobj178006*)0; LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2)); add_178482_2381377266(&prc0, LOC42); { NIM_BOOL LOC45; Ropeobj178006* LOC49; LOC45 = (NIM_BOOL)0; LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0); if (!(LOC45)) goto LA46; LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0)); LA46: ; if (!LOC45) goto LA47; LOC49 = (Ropeobj178006*)0; LOC49 = deinitframe_560150_839829468((*m0).initproc); add_178482_2381377266(&prc0, LOC49); } LA47: ; LOC50 = (Ropeobj178006*)0; LOC50 = deinitgcframe_538441_839829468((*m0).initproc); add_178482_2381377266(&prc0, LOC50); memset((void*)LOC51, 0, sizeof(LOC51)); addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0); memset((void*)LOC52, 0, sizeof(LOC52)); LOC52[0] = getdatinitname_562239_839829468((*m0).module); addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1); { Tcfilesection529005 i_562401_839829468; NI res_562482_839829468; i_562401_839829468 = (Tcfilesection529005)0; res_562482_839829468 = ((NI) 12); { while (1) { Ropeobj178006* LOC56; Ropeobj178006* LOC57; if (!(res_562482_839829468 <= ((NI) 16))) goto LA55; i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468)); LOC56 = (Ropeobj178006*)0; LOC56 = gensectionstart_530015_2760143328(i_562401_839829468); add_178482_2381377266(&prc0, LOC56); add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]); LOC57 = (Ropeobj178006*)0; LOC57 = gensectionend_530050_2760143328(i_562401_839829468); add_178482_2381377266(&prc0, LOC57); res_562482_839829468 += ((NI) 1); } LA55: ; } } memset((void*)LOC58, 0, sizeof(LOC58)); addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0); { NIM_CHAR i_562442_839829468; Ropeobj178006* el_562443_839829468; TY529136 HEX3Atmp_562487_839829468; NIM_CHAR i_562490_839829468; i_562442_839829468 = (NIM_CHAR)0; el_562443_839829468 = (Ropeobj178006*)0; memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468)); memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468)); i_562490_839829468 = 48; { if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62; { while (1) { i_562442_839829468 = i_562490_839829468; el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48]; { Ropeobj178006* ex0; TY532811 LOC70; if (!!((el_562443_839829468 == NIM_NIL))) goto LA68; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48))))); LOC70[1] = el_562443_839829468; ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0); } LA68: ; { if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73; goto LA64; } LA73: ; i_562490_839829468 += ((NI) 1); } } LA64: ; } LA62: ; } } N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Ropeobj178006* LOC3; if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2; LOC3 = (Ropeobj178006*)0; LOC3 = gettypedesc_535673_839829468(m0, (*m0).typestack->data[i0]); i0 += ((NI) 1); } LA2: ; } } N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; { TY178507 LOC5; if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686)); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1); } goto LA1; LA3: ; { TY536475 LOC7; NimStringDesc* LOC8; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686)); LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0); LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0); LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0); LOC8 = (NimStringDesc*)0; LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE); LOC7[4] = rope_178277_2381377266(LOC8); result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5); } LA1: ; return result0; } static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) { NimStringDesc* LOC1; TY178507 LOC2; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22); appendString(LOC1, ((NimStringDesc*) &T839829468_688)); appendString(LOC1, tnl_176644_4151366050); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1))); addf_179205_2381377266(result0, LOC1, LOC2, 1); } N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) { Ropeobj178006* result0; result0 = (Ropeobj178006*)0; result0 = getcopyright_561665_839829468(cfile0); addinttypes_561659_839829468(&result0); return result0; } N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY178507 LOC13; LOC3 = (NIM_BOOL)0; LOC3 = !((nimtv_538656_839829468 == NIM_NIL)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0); if (LOC5) goto LA6; LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; { Ttype292840* t_538761_839829468; NI i_538768_839829468; NI L_538770_839829468; t_538761_839829468 = (Ttype292840*)0; i_538768_839829468 = ((NI) 0); L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0); { while (1) { Ropeobj178006* LOC12; if (!(i_538768_839829468 < L_538770_839829468)) goto LA11; t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468]; LOC12 = (Ropeobj178006*)0; LOC12 = gettypedesc_535673_839829468(m0, t_538761_839829468); i_538768_839829468 += ((NI) 1); } LA11: ; } } memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = nimtv_538656_839829468; addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1); } LA7: ; } N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) { NimStringDesc* LOC1; Tstrentry147009* it0; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20); appendString(LOC1, tnl_176644_4151366050); appendString(LOC1, ((NimStringDesc*) &T839829468_690)); appendString(LOC1, tnl_176644_4151366050); add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1); it0 = ((Tstrentry147009*) ((*m0).headerfiles.head)); { while (1) { if (!!((it0 == NIM_NIL))) goto LA3; { NimStringDesc* LOC8; NimStringDesc* LOC9; Ropeobj178006* LOC10; if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nsuReplaceChar((*it0).data, 96, 34); LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0); appendString(LOC8, LOC9); appendString(LOC8, tnl_176644_4151366050); LOC10 = (Ropeobj178006*)0; LOC10 = rope_178277_2381377266(LOC8); add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10); } goto LA4; LA6: ; { TY178507 LOC14; if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_178277_2381377266((*it0).data); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1); } goto LA4; LA12: ; { TY178507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_178277_2381377266((*it0).data); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1); } LA4: ; it0 = ((Tstrentry147009*) ((*it0).Sup.next)); } LA3: ; } } N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) { Ropeobj178006* result0; Ropeobj178006* LOC1; result0 = (Ropeobj178006*)0; result0 = getfileheader_561683_839829468(cfile0); LOC1 = (Ropeobj178006*)0; LOC1 = genmergeinfo_530203_2760143328(m0); add_178482_2381377266(&result0, LOC1); generatethreadlocalstorage_538717_839829468(m0); generateheaders_560104_839829468(m0); { Tcfilesection529005 i_562614_839829468; NI res_562622_839829468; i_562614_839829468 = (Tcfilesection529005)0; res_562622_839829468 = ((NI) 1); { while (1) { Ropeobj178006* LOC5; Ropeobj178006* LOC6; if (!(res_562622_839829468 <= ((NI) 10))) goto LA4; i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468)); LOC5 = (Ropeobj178006*)0; LOC5 = gensectionstart_530015_2760143328(i_562614_839829468); add_178482_2381377266(&result0, LOC5); add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]); LOC6 = (Ropeobj178006*)0; LOC6 = gensectionend_530050_2760143328(i_562614_839829468); add_178482_2381377266(&result0, LOC6); res_562622_839829468 += ((NI) 1); } LA4: ; } } add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]); return result0; } N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_563201_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj178006* code0; LOC3 = (NIM_BOOL)0; LOC3 = mergerequired_530832_2760143328(m0); if (!(LOC3)) goto LA4; LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)); LA4: ; if (!LOC3) goto LA5; mergefiles_531241_2760143328(cfile0, m0); geninitcode_562286_839829468(m0); finishtypedescriptions_535842_839829468(m0); code0 = genmodule_562491_839829468(m0, cfile0); writerope_178836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_273863_2528170400(cfile0); } LA5: ; addfiletolink_273872_2528170400(cfilenoext0); } N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) { { NimStringDesc* externc0; TY178507 LOC12; if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2))); if (!(LOC7)) goto LA8; LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; externc0 = copyString(((NimStringDesc*) &T839829468_693)); } goto LA5; LA9: ; { externc0 = copyString(((NimStringDesc*) &T839829468_490)); } LA5: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_178277_2381377266(externc0); addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1); } LA3: ; } N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; result0 = NIM_TRUE; { NimStringDesc* objfile0; if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3; objfile0 = toobjfile_273859_2528170400(cfile0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0); if (!LOC7) goto LA8; goto BeforeRet; } LA8: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = nosexistsFile(objfile0); if (!(LOC12)) goto LA13; LOC12 = nosfileNewer(objfile0, cfile0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; } LA14: ; } goto LA1; LA3: ; { writerope_178836_2381377266(code0, cfile0, NIM_FALSE); } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_563201_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj178006* code0; LOC3 = (NIM_BOOL)0; LOC3 = !((*m0).Sup.fromcache); if (LOC3) goto LA4; LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0); LA4: ; if (!LOC3) goto LA5; geninitcode_562286_839829468(m0); finishtypedescriptions_535842_839829468(m0); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9; add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438); generatethreadvarssize_538771_839829468(m0); } LA9: ; code0 = genmodule_562491_839829468(m0, cfile0); { NIM_BOOL LOC13; LOC13 = (NIM_BOOL)0; LOC13 = shouldrecompile_563621_839829468(code0, cfile0); if (!LOC13) goto LA14; addfiletocompile_273863_2528170400(cfile0); } LA14: ; } goto LA1; LA5: ; { NIM_BOOL LOC17; NIM_BOOL LOC18; Ropeobj178006* code0; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = pending0; if (!(LOC18)) goto LA19; LOC18 = mergerequired_530832_2760143328(m0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)); LA20: ; if (!LOC17) goto LA21; mergefiles_531241_2760143328(cfile0, m0); geninitcode_562286_839829468(m0); finishtypedescriptions_535842_839829468(m0); code0 = genmodule_562491_839829468(m0, cfile0); writerope_178836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_273863_2528170400(cfile0); } goto LA1; LA21: ; { NimStringDesc* LOC24; NIM_BOOL LOC25; LOC24 = (NimStringDesc*)0; LOC24 = toobjfile_273859_2528170400(cfilenoext0); LOC25 = (NIM_BOOL)0; LOC25 = nosexistsFile(LOC24); if (!!(LOC25)) goto LA26; addfiletocompile_273863_2528170400(cfile0); } goto LA1; LA26: ; LA1: ; addfiletolink_273872_2528170400(cfilenoext0); } N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0) { Ropeobj178006* result0; Ropeobj178006* guard0; TY178507 LOC1; TY128506 LOC2; TY178507 LOC3; TY533289 LOC13; TY178507 LOC14; result0 = getcopyright_561665_839829468((*m0).filename); memset((void*)LOC1, 0, sizeof(LOC1)); memset((void*)(&LOC2), 0, sizeof(LOC2)); nossplitFile((*m0).filename, (&LOC2)); LOC1[0] = rope_178277_2381377266(LOC2.Field1); guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = guard0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1); addinttypes_561659_839829468(&result0); generateheaders_560104_839829468(m0); generatethreadlocalstorage_538717_839829468(m0); { Tcfilesection529005 i_563171_839829468; NI res_563197_839829468; i_563171_839829468 = (Tcfilesection529005)0; res_563197_839829468 = ((NI) 1); { while (1) { Ropeobj178006* LOC7; Ropeobj178006* LOC8; if (!(res_563197_839829468 <= ((NI) 10))) goto LA6; i_563171_839829468 = ((Tcfilesection529005) (res_563197_839829468)); LOC7 = (Ropeobj178006*)0; LOC7 = gensectionstart_530015_2760143328(i_563171_839829468); add_178482_2381377266(&result0, LOC7); add_178482_2381377266(&result0, (*m0).s[(i_563171_839829468)- 0]); LOC8 = (Ropeobj178006*)0; LOC8 = gensectionend_530050_2760143328(i_563171_839829468); add_178482_2381377266(&result0, LOC8); res_563197_839829468 += ((NI) 1); } LA6: ; } } add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]); { if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11; add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } LA11: ; memset((void*)LOC13, 0, sizeof(LOC13)); addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = guard0; addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1); writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE); } N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) { { if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3; finishmodule_563420_839829468(generatedheader_532201_839829468); } LA3: ; { while (1) { if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6; { Tcgen529027* m_563916_839829468; m_563916_839829468 = (Tcgen529027*)0; { NI i_563935_839829468; NI HEX3Atmp_563937_839829468; NI res_563939_839829468; i_563935_839829468 = (NI)0; HEX3Atmp_563937_839829468 = (NI)0; HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1); res_563939_839829468 = ((NI) 0); { while (1) { if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10; i_563935_839829468 = res_563939_839829468; { if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13; m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468]; { if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17; finishmodule_563420_839829468(m_563916_839829468); } LA17: ; } LA13: ; res_563939_839829468 += ((NI) 1); } LA10: ; } } } } LA6: ; } { Tcgen529027* m_563917_839829468; m_563917_839829468 = (Tcgen529027*)0; { NI i_563946_839829468; NI HEX3Atmp_563948_839829468; NI res_563950_839829468; i_563946_839829468 = (NI)0; HEX3Atmp_563948_839829468 = (NI)0; HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1); res_563950_839829468 = ((NI) 0); { while (1) { if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22; i_563946_839829468 = res_563950_839829468; { if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25; m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468]; { if (!(*m_563917_839829468).Sup.fromcache) goto LA29; updatecachedmodule_563813_839829468(m_563917_839829468); } goto LA27; LA29: ; { writemodule_563637_839829468(m_563917_839829468, NIM_TRUE); } LA27: ; } LA25: ; res_563950_839829468 += ((NI) 1); } LA22: ; } } } writemapping_274789_2528170400(gmapping_529152_3723162438); { if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34; writeheader_563149_839829468(generatedheader_532201_839829468); } LA34: ; } N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) { { Tcfilesection529005 i_562848_839829468; NI res_562853_839829468; i_562848_839829468 = (Tcfilesection529005)0; res_562853_839829468 = ((NI) 0); { while (1) { if (!(res_562853_839829468 <= ((NI) 17))) goto LA3; i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468)); unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL); res_562853_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) { { NIM_CHAR i_563014_839829468; NI res_563019_839829468; i_563014_839829468 = (NIM_CHAR)0; res_563019_839829468 = ((NI) 48); { while (1) { if (!(res_563019_839829468 <= ((NI) 57))) goto LA3; i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468)); unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL); res_563019_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) { initlinkedlist_147031_3771138726((&(*m0).headerfiles)); initintset_268885_2627731572((&(*m0).declaredprotos)); initidtable_296019_850551059((&(*m0).forwtypecache)); asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0)); (*(*m0).initproc).options = initprocoptions_562635_839829468(m0); asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0)); asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0)); initnodetable_296085_850551059((&(*m0).datacache)); if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack); (*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0); if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs); (*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0); asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533598_839829468(m0)); asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533598_839829468(m0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8)); } goto LA1; LA3: ; { (*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8))); } LA1: ; nullify_562833_839829468((*m0).s); (*m0).typenodes = ((NI) 0); (*m0).nimtypes = ((NI) 0); nullify_562858_839829468((*m0).extensionloaders); (*m0).Sup.fromcache = NIM_TRUE; } N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) { { Tcgen529027* m_563026_839829468; m_563026_839829468 = (Tcgen529027*)0; { NI i_563031_839829468; NI HEX3Atmp_563033_839829468; NI res_563035_839829468; i_563031_839829468 = (NI)0; HEX3Atmp_563033_839829468 = (NI)0; HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1); res_563035_839829468 = ((NI) 0); { while (1) { if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4; i_563031_839829468 = res_563035_839829468; { if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7; m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468]; resetmodule_562763_839829468(m_563026_839829468); } LA7: ; res_563035_839829468 += ((NI) 1); } LA4: ; } } } } NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) { nimRegisterGlobalMarker(T839829468_2); nimRegisterGlobalMarker(T839829468_3); nimRegisterGlobalMarker(T839829468_5); nimRegisterGlobalMarker(T839829468_6); nimRegisterGlobalMarker(T839829468_7); nimRegisterGlobalMarker(T839829468_8); asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4))); if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468); nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0); chckNil((void*)(&nimtvdeclared_538675_839829468)); genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030)); initintset_268885_2627731572((&nimtvdeclared_538675_839829468)); breakpointid_548860_839829468 = ((NI) 0); } NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) { }
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /*- Compiler specifics -*/ #ifdef __clang__ #pragma clang diagnostic ignored "-Wshorten-64-to-32" #endif /*- Dependencies -*/ #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ saint_t divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t *bucket_A, *bucket_B; saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; }
GB_unop__cos_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fc64_fc64 // op(A') function: GB_unop_tran__cos_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__bclr_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_uint64 // A.*B function (eWiseMult): GB_AemultB__bclr_uint64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bclr_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint64 // C=scalar+B GB_bind1st__bclr_uint64 // C=scalar+B' GB_bind1st_tran__bclr_uint64 // C=A+scalar GB_bind2nd__bclr_uint64 // C=A'+scalar GB_bind2nd_tran__bclr_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint64_t, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITCLR (x, y, uint64_t, 64) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT64 || GxB_NO_BCLR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bclr_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, uint64_t, 64) ; \ } GrB_Info GB_bind1st_tran__bclr_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, uint64_t, 64) ; \ } GrB_Info GB_bind2nd_tran__bclr_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
integrator.h
#ifndef INTEGRATOR_H #define INTEGRATOR_H #include <omp.h> #include <memory> #include "camera.h" #include "film.h" #include "sampler.h" #include "timer.h" #include "util.h" class Integrator { public: std::shared_ptr<Camera> cam; std::shared_ptr<Sampler> sampler; Integrator(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : cam(_cam), sampler(_sampler) {}; virtual void render(const Scene& scene) const = 0; virtual void compute(const Scene& scene) const = 0; }; class NormalRenderer : public Integrator { public: NormalRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { cam->film->setPixel(i, j, (res.hitNormal + 1.0f)/2.0f); } else { cam->film->setPixel(i, j, RGB(0.0f)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { cam->film->addSample(i, j, (res.hitNormal + 1.0f)/2.0f); } else { cam->film->addSample(i, j, RGB(0.0f)); } } } }; }; class DepthRenderer : public Integrator { public: DepthRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { cam->film->setPixel(i, j, res.t); } else { cam->film->setPixel(i, j, RGB(0)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class DotRenderer : public Integrator { public: DotRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { float d = dot(-ray.direction, res.hitNormal); std::cout << d << std::endl; cam->film->setPixel(i, j, w*(std::max(dot(-ray.direction, res.hitNormal), 0.0f))); } else { cam->film->setPixel(i, j, w*RGB(0.0f)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class AlbedoRenderer : public Integrator { public: AlbedoRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { std::shared_ptr<Material> hitMaterial = res.hitPrimitive->material; Vec3 wo = -ray.direction; Vec3 n = res.hitNormal; Vec3 s = res.dpdu; Vec3 t = normalize(cross(s, n)); Vec3 wo_local = worldToLocal(wo, n, s, t); Vec3 wi_local; float brdf_pdf; RGB brdf_f = hitMaterial->sample(wo_local, wi_local, *sampler, brdf_pdf); Vec3 wi = localToWorld(wi_local, n, s, t); cam->film->setPixel(i, j, brdf_f); } else { cam->film->setPixel(i, j, RGB(0.0f)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class AORenderer : public Integrator { public: AORenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { std::shared_ptr<Material> hitMaterial = res.hitPrimitive->material; Vec3 wo = -ray.direction; Vec3 n = res.hitNormal; Vec3 s = res.dpdu; Vec3 t = normalize(cross(s, n)); Vec3 wo_local = worldToLocal(wo, n, s, t); Vec3 wi_local; float brdf_pdf; int hit_count = 0; for(int k = 0; k < 100; k++) { RGB brdf_f = hitMaterial->sample(wo_local, wi_local, *sampler, brdf_pdf); Vec3 wi = localToWorld(wi, n, s, t); Ray nextRay(res.hitPos, wi); Hit res2; if(scene.intersect(nextRay, res2)) hit_count++; } cam->film->setPixel(i, j, hit_count/100.0f*RGB(1.0f)); } else { cam->film->setPixel(i, j, w*RGB(0.0f)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class WireframeRenderer : public Integrator { public: WireframeRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler) : Integrator(_cam, _sampler) {}; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Hit res; if(scene.intersect(ray, res)) { RGB col(res.uv.x, res.uv.y, 0.0f); cam->film->setPixel(i, j, w*col); } else { cam->film->setPixel(i, j, w*RGB(0.0f)); } } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class PathTraceDepthRenderer : public Integrator { public: int maxDepth; PathTraceDepthRenderer(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler, int _maxDepth) : Integrator(_cam, _sampler), maxDepth(_maxDepth) {}; RGB Li(const Ray& ray, const Scene& scene, int depth = 0, float roulette = 1.0f) const { if(depth > maxDepth) return RGB(1.0f); Hit res; RGB col; if(scene.intersect(ray, res)) { //マテリアル const std::shared_ptr<Material> hitMaterial = res.hitPrimitive->material; //BRDFの計算と方向のサンプリング const Vec3 wo = -ray.direction; const Vec3 n = res.hitNormal; const Vec3 s = res.dpdu; const Vec3 t = normalize(cross(s, n)); Vec3 wo_local = worldToLocal(wo, n, s, t); Vec3 wi_local; float brdf_pdf; const RGB brdf_f = hitMaterial->sample(wo_local, wi_local, *sampler, brdf_pdf); Vec3 wi = localToWorld(wi, n, s, t); Ray nextRay(res.hitPos, wi); return Li(nextRay, scene, depth + 1, roulette); } else { return RGB((float)depth/maxDepth); } }; void render(const Scene& scene) const { for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float u = (2.0*i - cam->film->width)/cam->film->width; float v = -(2.0*j - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); RGB col = Li(ray, scene); cam->film->setPixel(i, j, w*col); } } cam->film->ppm_output("output.ppm"); }; void compute(const Scene& scene) const {}; }; class PathTrace : public Integrator { public: int pixelSamples; int maxDepth; PathTrace(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler, int _pixelSamples, int _maxDepth) : Integrator(_cam, _sampler), pixelSamples(_pixelSamples), maxDepth(_maxDepth) {}; RGB Li(const Ray& ray, const Scene& scene, int depth = 0, float roulette = 1.0f) const { //ロシアンルーレット if(depth > 10) { if(sampler->getNext() < 1.0f - roulette) { return RGB(0.0f); } roulette *= 0.9f; } if(depth > maxDepth) return RGB(0.0f); Hit res; RGB col; if(scene.intersect(ray, res)) { //もし光源に当たったら終了 if(res.hitPrimitive->areaLight != nullptr) { return res.hitPrimitive->areaLight->Le(res)/roulette; } //マテリアル const std::shared_ptr<Material> hitMaterial = res.hitPrimitive->material; //ローカル座標系の構築 const Vec3 wo = -ray.direction; if(iszero(wo)) { std::cout << res.t << std::endl; } const Vec3 n = res.hitNormal; const Vec3 s = res.dpdu; const Vec3 t = normalize(cross(s, n)); const Vec3 wo_local = worldToLocal(wo, n, s, t); //BRDFの計算と方向のサンプリング Vec3 wi_local; float brdf_pdf = 1.0f; const RGB brdf_f = hitMaterial->sample(wo_local, wi_local, *sampler, brdf_pdf); //もしサンプリングが失敗したら終了 if(iszero(wi_local)) return RGB(0.0f); //サンプリングされた方向をワールド座標系に戻す Vec3 wi = localToWorld(wi_local, n, s, t); //コサイン項 const float cos_term = std::abs(wi_local.y); //係数 RGB k = 1.0f/(roulette*brdf_pdf) * cos_term * brdf_f; if(k.x < 0.0f || k.y < 0.0f || k.z < 0.0f) { std::cout << "minus k detected" << std::endl; std::cout << "wo: " << wo << std::endl; std::cout << "wo_local: " << wo_local << std::endl; std::cout << "n: " << n << std::endl; std::cout << "wi: " << wi << std::endl; std::cout << "wi_local: " << wi_local << std::endl; std::cout << "cos_term: " << cos_term << std::endl; std::cout << "brdf_f:" << brdf_f << std::endl; std::cout << "k: " << k << std::endl; k = abs(k); } if(isnan(k) || isinf(k)) { std::cout << "inf or nan k detected" << std::endl; std::cout << "brdf_pdf: " << brdf_pdf << std::endl; std::cout << "brdf_f: " << brdf_f << std::endl; return RGB(0); } //レンダリング方程式の計算 Ray nextRay(res.hitPos, wi); col += k * Li(nextRay, scene, depth + 1, roulette); } else { col = scene.sky->getSky(ray); } return col; }; void render(const Scene& scene) const { Timer timer; if(!cam->two_eyes) { timer.start(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->height; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); RGB col = Li(ray, scene); cam->film->addSample(i, j, w*col); } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("output.ppm"); } else { timer.start(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->width; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); RGB col = Li(ray, scene); cam->film->addSample(i, j, w*col); } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("left.ppm"); timer.start(); cam->film->clear(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->width; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler, false); RGB col = Li(ray, scene); cam->film->addSample(i, j, w*col); } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("right.ppm"); } }; void compute(const Scene& scene) const { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->height; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); RGB col = Li(ray, scene); cam->film->addSample(i, j, w*col); } } }; }; class PathTraceExplicit : public Integrator { public: int pixelSamples; int maxDepth; PathTraceExplicit(std::shared_ptr<Camera> _cam, std::shared_ptr<Sampler> _sampler, int _pixelSamples, int _maxDepth) : Integrator(_cam, _sampler), pixelSamples(_pixelSamples), maxDepth(_maxDepth) {}; RGB Li(const Ray& ray, const Scene& scene, Vec3& hit_le, int depth = 0, float roulette = 1.0f) const { //ロシアンルーレット if(depth > 10) { if(sampler->getNext() < 1.0f - roulette) { return RGB(0.0f); } roulette *= 0.9f; } if(depth > maxDepth) return RGB(0.0f); Hit res; RGB col; if(scene.intersect(ray, res)) { //光源に当たった場合 if(res.hitPrimitive->areaLight != nullptr) { //直接光源に当たった場合 if(depth == 0) { hit_le = res.hitPrimitive->areaLight->Le(res); } return RGB(0.0f); } //マテリアル const std::shared_ptr<Material> hitMaterial = res.hitPrimitive->material; //ローカル座標系の構築 const Vec3 wo = -ray.direction; const Vec3 n = res.hitNormal; const Vec3 s = res.dpdu; const Vec3 t = normalize(cross(s, n)); const Vec3 wo_local = worldToLocal(wo, n, s, t); //各光源からの寄与を計算 //DiffuseあるいはGlossyの場合のみに寄与を計算する if(hitMaterial->type == MATERIAL_TYPE::DIFFUSE || hitMaterial->type == MATERIAL_TYPE::GLOSSY) { for(const std::shared_ptr<Light> light : scene.lights) { //光源上で点をサンプリング float light_pdf = 1.0f; Vec3 wi_light; const RGB le = light->sample(res, *sampler, wi_light, light_pdf); const Vec3 wi_light_local = worldToLocal(wi_light, n, s, t); //光源に向かうシャドウレイを生成 Ray shadowRay(res.hitPos, wi_light); Hit shadow_res; //AreaLight if(light->type == LIGHT_TYPE::AREA) { //シャドウレイが物体に当たったとき、それがサンプリング生成元の光源だった場合は寄与を蓄積 if(scene.intersect(shadowRay, shadow_res)) { if(shadow_res.hitPrimitive->areaLight == light) { col += hitMaterial->f(wo_local, wi_light_local) * le/light_pdf * std::max(wi_light_local.y, 0.0f); } else { //std::cout << "shadow ray missed" << std::endl; } } } //PointLight else if(light->type == LIGHT_TYPE::POINT) { if(!scene.intersect(shadowRay, shadow_res)) col += hitMaterial->f(wo_local, wi_light_local) * le/light_pdf * std::max(wi_light_local.y, 0.0f); } //DirectionalLight else if(light->type == LIGHT_TYPE::DIRECTIONAL) { if(!scene.intersect(shadowRay, shadow_res)) col += hitMaterial->f(wo_local, wi_light_local) * le/light_pdf * std::max(wi_light_local.y, 0.0f); } } } //BRDFの計算と方向のサンプリング Vec3 wi_local; float brdf_pdf = 1.0f; const RGB brdf_f = hitMaterial->sample(wo_local, wi_local, *sampler, brdf_pdf); //もしサンプリングが失敗したら終了 if(iszero(wi_local)) return RGB(0.0f); //サンプリングされた方向をワールド座標系に戻す Vec3 wi = localToWorld(wi_local, n, s, t); //コサイン項 const float cos_term = std::abs(wi_local.y); //係数 RGB k = 1.0f/(roulette*brdf_pdf) * cos_term * brdf_f; if(k.x < 0.0f || k.y < 0.0f || k.z < 0.0f) { std::cout << "minus k detected" << std::endl; std::cout << "wo: " << wo << std::endl; std::cout << "wo_local: " << wo_local << std::endl; std::cout << "n: " << n << std::endl; std::cout << "wi: " << wi << std::endl; std::cout << "wi_local: " << wi_local << std::endl; std::cout << "cos_term: " << cos_term << std::endl; std::cout << "brdf_f:" << brdf_f << std::endl; std::cout << "k: " << k << std::endl; k = abs(k); } if(isnan(k) || isinf(k)) { std::cout << "inf or nan k detected" << std::endl; return RGB(0); } //レンダリング方程式の計算 Ray nextRay(res.hitPos, wi); col += k * Li(nextRay, scene, hit_le, depth + 1, roulette); } else { col = scene.sky->getSky(ray); } return col; }; void render(const Scene& scene) const { Timer timer; if(!cam->two_eyes) { timer.start(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->height; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Vec3 hit_le; RGB col = Li(ray, scene, hit_le); if(!nonzero(hit_le)) { cam->film->addSample(i, j, w*col); } else { cam->film->addSample(i, j, w*hit_le); }; } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("output.ppm"); } else { timer.start(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->width; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Vec3 hit_le; RGB col = Li(ray, scene, hit_le); if(!nonzero(hit_le)) { cam->film->addSample(i, j, w*col); } else { cam->film->addSample(i, j, w*hit_le); } } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("left.ppm"); timer.start(); cam->film->clear(); for(int k = 0; k < pixelSamples; k++) { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->width; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler, false); Vec3 hit_le; RGB col = Li(ray, scene, hit_le); if(!nonzero(hit_le)) { cam->film->addSample(i, j, w*col); } else { cam->film->addSample(i, j, w*hit_le); } } } std::cout << progressbar(k, pixelSamples) << " " << percentage(k, pixelSamples) << '\r' << std::flush; } timer.stop("Rendering Finished"); cam->film->divide(pixelSamples); cam->film->gamma_correction(); cam->film->ppm_output("right.ppm"); } }; void compute(const Scene& scene) const { #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < cam->film->width; i++) { for(int j = 0; j < cam->film->height; j++) { float rx = sampler->getNext(); float ry = sampler->getNext(); float px = i + rx; float py = j + ry; float u = (2.0*(i + rx) - cam->film->width)/cam->film->height; float v = -(2.0*(j + ry) - cam->film->height)/cam->film->height; float w; Ray ray = cam->getRay(u, v, w, *sampler); Vec3 hit_le; RGB col = Li(ray, scene, hit_le); if(!nonzero(hit_le)) { cam->film->addSample(i, j, w*col); } else { cam->film->addSample(i, j, w*hit_le); } } } }; }; #endif
main-single-mailbox-atomics.c
#include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <shmem.h> #include <shmemx.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <omp.h> #include <signal.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #include <limits.h> #define VERBOSE // #ifdef USE_CRC // #include "crc.h" // typedef int32_t size_type; // #elif USE_MURMUR // #include "MurmurHash3.h" // typedef uint32_t crc; // typedef int32_t size_type; // #elif USE_CITY32 // #include "city.h" // typedef uint32_t crc; // typedef int32_t size_type; // #elif USE_CITY64 // #include "city.h" // typedef uint64_t crc; // typedef int64_t size_type; // #else // #error No hashing algorithm specific // #endif #include "mrg.h" #include "packed_edge.h" #include "utilities.h" #include "generator.h" // #define QUEUE_SIZE 1572864 #define QUEUE_SIZE 1048576 // #define INCOMING_MAILBOX_SIZE_IN_BYTES 100663296 #define INCOMING_MAILBOX_SIZE_IN_BYTES (200 * 1024 * 1024) #define CONTEXTS_PER_THREAD 2 /* * Header format: * * sizeof(crc) bytes : header checksum * sizeof(size_type) bytes : Length of whole packet in bytes (N) * sizeof(crc) bytes : CRC32 body checksum * N - sizeof(crc) - sizeof(size_type) - sizeof(crc) bytes : Body */ #define COALESCING 512 #define SEND_HEADER_SIZE (sizeof(crc) + sizeof(size_type) + sizeof(crc)) #define SEND_BUFFER_SIZE (SEND_HEADER_SIZE + COALESCING * sizeof(packed_edge)) #define BITS_PER_BYTE 8 #define BITS_PER_INT (sizeof(unsigned) * BITS_PER_BYTE) #define BITS_PER_LONGLONG (sizeof(unsigned long long) * BITS_PER_BYTE) #define MAX_ITERS 10 typedef struct _send_buf { unsigned char *buf; struct _send_buf *next; } send_buf; #define SEND_BUF_SIZE_TO_NEDGES(my_send_buf_size) (((my_send_buf_size) - SEND_HEADER_SIZE) / sizeof(packed_edge)) #define GET_SEND_BUF(my_target_pe) { \ assert(send_bufs[my_target_pe] == NULL); \ send_buf *gotten = pre_allocated_send_bufs; \ assert(gotten); \ pre_allocated_send_bufs = gotten->next; \ send_bufs[my_target_pe] = gotten; \ send_bufs_size[my_target_pe] = SEND_HEADER_SIZE; \ } #define PREPARE_PACKET(my_target_pe) { \ assert(send_bufs[my_target_pe]); \ const unsigned send_buf_size = send_bufs_size[my_target_pe]; \ assert((send_buf_size - SEND_HEADER_SIZE) % sizeof(packed_edge) == 0); \ assert(send_buf_size <= SEND_BUFFER_SIZE); \ const unsigned nedges = SEND_BUF_SIZE_TO_NEDGES(send_buf_size); \ unsigned char *send_buf = send_bufs[my_target_pe]->buf; \ /* Save the total size of this packet */ \ *((size_type *)(send_buf + sizeof(crc))) = send_buf_size; \ /* Save the CRC of the body of this packet */ \ *((crc *)(send_buf + sizeof(crc) + sizeof(size_type))) = hash( \ (const unsigned char *)(send_buf + SEND_HEADER_SIZE), \ send_buf_size - SEND_HEADER_SIZE); \ /* Save the CRC of the header of this packet */ \ *((crc *)send_buf) = hash( \ (const unsigned char *)(send_buf + sizeof(crc)), \ SEND_HEADER_SIZE - sizeof(crc)); \ } #define SEND_PACKET(my_target_pe) { \ PREPARE_PACKET(my_target_pe) \ \ const int remote_offset = shmem_int_fadd( \ recv_buf_index, send_bufs_size[my_target_pe], \ my_target_pe); \ assert(remote_offset + send_bufs_size[my_target_pe] < INCOMING_MAILBOX_SIZE_IN_BYTES); \ shmem_char_put_nbi((char *)(recv_buf + remote_offset), \ (const char *)send_bufs[my_target_pe]->buf, \ send_bufs_size[my_target_pe], my_target_pe); \ \ send_bufs[my_target_pe] = NULL; \ send_bufs_size[my_target_pe] = 0; \ } // #define VERBOSE // #define PROFILE static int pe = -1; static int npes = -1; void sig_handler(int signo) { fprintf(stderr, "%d: received signal %d %d\n", pe, signo, SIGUSR1); raise(SIGABRT); assert(0); // should never reach here } void *kill_func(void *data) { int kill_seconds = *((int *)data); int err = sleep(kill_seconds); assert(err == 0); fprintf(stderr, "hitting pe %d with SUGUSR1\n", pe); raise(SIGUSR1); return NULL; } #ifdef PROFILE unsigned long long hash_time = 0; unsigned long long hash_calls = 0; unsigned long long wasted_hashes = 0; unsigned long long total_packets_received = 0; unsigned long long n_packets_wasted = 0; unsigned long long total_elements_received = 0; unsigned long long n_elements_wasted = 0; #ifdef DETAILED_PROFILE unsigned *wavefront_visited = NULL; unsigned long long duplicates_in_same_wavefront = 0; unsigned long long duplicates_in_same_wavefront_total = 0; #endif #endif // static inline crc hash(const unsigned char * const data, const size_t len) { // #ifdef PROFILE // const unsigned long long start_time = current_time_ns(); // #endif // // crc result; // #ifdef USE_CRC // result = crcFast(data, len); // #elif USE_MURMUR // MurmurHash3_x86_32(data, len, 12345, &result); // #elif USE_CITY32 // result = CityHash32((const char *)data, len); // #elif USE_CITY64 // result = CityHash64((const char *)data, len); // #else // #error No hashing algorithm specified // #endif // // #ifdef PROFILE // hash_time += (current_time_ns() - start_time); // hash_calls++; // #endif // // return result; // } uint64_t bfs_roots[] = {240425174, 115565041, 66063943, 180487911, 11178951, 123935973, 231036167, 373595937, 363787030, 85801485, 108275987, 69071368, 514373733, 251500048, 140103887, 506907254, 39995468, 195903646, 21863341, 390997409, 470978452, 372755572, 449581394, 461086083, 357027875, 355651295, 18628407, 427844427, 273604491, 372475785, 427329960, 465597328, 78313325, 90706091, 457847627, 430362844, 178489195, 374418701, 7644678, 154891942, 353689376, 56388509, 191747720, 264370699, 20638787, 421731131, 14127289, 411537113, 397525451, 189929616, 140277533, 221845716, 135921328, 141538717, 264336150, 267866811, 413698500, 263044574, 490922152, 81101617, 415841963, 132009584, 67293842, 148419562}; volatile long long n_local_edges = 0; volatile long long max_n_local_edges; static uint64_t get_vertices_per_pe(uint64_t nvertices) { return (nvertices + npes - 1) / npes; } static uint64_t get_starting_vertex_for_pe(int pe, uint64_t nvertices) { uint64_t vertices_per_pe = get_vertices_per_pe(nvertices); return pe * vertices_per_pe; } static uint64_t get_ending_vertex_for_pe(int pe, uint64_t nvertices) { uint64_t vertices_per_pe = get_vertices_per_pe(nvertices); uint64_t limit = (pe + 1) * vertices_per_pe; if (limit > nvertices) limit = nvertices; return limit; } static inline int get_owner_pe(uint64_t vertex, uint64_t nvertices) { uint64_t vertices_per_pe = get_vertices_per_pe(nvertices); return vertex / vertices_per_pe; } static inline void set_visited_longlong(const uint64_t bit_index, unsigned long long *vector, const unsigned length) { const uint64_t longlong_index = bit_index / (uint64_t)BITS_PER_LONGLONG; assert(longlong_index >= 0 && longlong_index < length); const uint64_t longlong_bit_index = bit_index % (uint64_t)BITS_PER_LONGLONG; const unsigned long long mask = ((unsigned long long)1 << longlong_bit_index); vector[longlong_index] |= mask; } static inline int is_visited_longlong(const uint64_t bit_index, const unsigned long long *vector) { const unsigned longlong_index = bit_index / (uint64_t)BITS_PER_LONGLONG; const uint64_t longlong_bit_index = bit_index % (uint64_t)BITS_PER_LONGLONG; const unsigned long long mask = ((unsigned long long)1 << longlong_bit_index); return (((vector[longlong_index] & mask) > 0) ? 1 : 0); } static inline void set_visited(const uint64_t global_vertex_id, unsigned *visited, const unsigned visited_length) { const int word_index = global_vertex_id / BITS_PER_INT; assert(word_index < visited_length); const int bit_index = global_vertex_id % BITS_PER_INT; const int mask = (1 << bit_index); // __sync_fetch_and_or(visited + word_index, mask); visited[word_index] |= mask; } static inline int is_visited(const uint64_t global_vertex_id, const unsigned *visited, const size_t visited_length) { const unsigned word_index = global_vertex_id / BITS_PER_INT; assert(word_index < visited_length); const int bit_index = global_vertex_id % BITS_PER_INT; const int mask = (1 << bit_index); return (((visited[word_index] & mask) > 0) ? 1 : 0); } /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ static void make_mrg_seed(uint64_t userseed1, uint64_t userseed2, uint_fast32_t* seed) { seed[0] = (userseed1 & 0x3FFFFFFF) + 1; seed[1] = ((userseed1 >> 30) & 0x3FFFFFFF) + 1; seed[2] = (userseed2 & 0x3FFFFFFF) + 1; seed[3] = ((userseed2 >> 30) & 0x3FFFFFFF) + 1; seed[4] = ((userseed2 >> 60) << 4) + (userseed1 >> 60) + 1; } static int compare_uint64_t(const void *a, const void *b) { const uint64_t *aa = (const uint64_t *)a; const uint64_t *bb = (const uint64_t *)b; if (*aa < *bb) { return -1; } else if (*aa == *bb) { return 0; } else { return 1; } } static inline long long traverse_all_verts(const uint64_t nglobalverts, unsigned long long * restrict marked, unsigned long long * restrict last_marked, unsigned long long * restrict marking, unsigned long long * restrict actual_marking, int * restrict global_vert_to_local_neighbors_offsets, int * restrict global_vert_to_local_neighbors, const uint64_t local_min_vertex, const uint64_t local_max_vertex, const unsigned longlong_length, unsigned long long * restrict setting_time, unsigned long long * restrict saving_time, unsigned *local_vertex_offsets, uint64_t *neighbors, const long long iter, long *preds) { const unsigned long long start_setting = current_time_ns(); long long count_signals = 0; if (iter != 2 && iter != 3) { const unsigned nlocalverts = local_max_vertex - local_min_vertex; for (unsigned local_vert = 0; local_vert < nlocalverts; local_vert++) { uint64_t global_vert = local_min_vertex + (uint64_t)local_vert; if (is_visited_longlong(global_vert, last_marked) && !is_visited_longlong(global_vert, marked)) { const unsigned neighbor_start = local_vertex_offsets[local_vert]; const unsigned neighbor_end = local_vertex_offsets[local_vert + 1]; for (int j = neighbor_start; j < neighbor_end; j++) { const uint64_t to_explore_global_id = neighbors[j]; if (!is_visited_longlong(to_explore_global_id, marked)) { preds[to_explore_global_id] = global_vert; set_visited_longlong(to_explore_global_id, marking, longlong_length); count_signals++; } } } } } else { #pragma omp parallel for schedule(static) default(none) \ firstprivate(marked, last_marked, marking, actual_marking, \ global_vert_to_local_neighbors_offsets, \ global_vert_to_local_neighbors, preds) \ reduction(+:count_signals) for (size_t longlong_index = 0; longlong_index < longlong_length; longlong_index++) { for (int bit_index = 0; bit_index < BITS_PER_LONGLONG; bit_index++) { const uint64_t global_vertex = longlong_index * BITS_PER_LONGLONG + bit_index; if (global_vertex < nglobalverts && !is_visited_longlong(global_vertex, marked)) { const int local_neighbors_start = global_vert_to_local_neighbors_offsets[global_vertex]; const int local_neighbors_end = global_vert_to_local_neighbors_offsets[global_vertex + 1]; for (int i = local_neighbors_start; i < local_neighbors_end; i++) { const uint64_t neighbor_global_vertex = local_min_vertex + global_vert_to_local_neighbors[i]; if (is_visited_longlong(neighbor_global_vertex, last_marked) && !is_visited_longlong(neighbor_global_vertex, marked)) { preds[global_vertex] = neighbor_global_vertex; set_visited_longlong(global_vertex, marking, longlong_length); const uint64_t longlong_index = global_vertex / (uint64_t)BITS_PER_LONGLONG; count_signals++; break; } } } } } // omp for } const unsigned long long start_saving = current_time_ns(); const size_t my_min_longlong = local_min_vertex / BITS_PER_LONGLONG; const size_t my_max_longlong = (local_max_vertex - 1) / BITS_PER_LONGLONG; if (my_min_longlong == my_max_longlong) { marked[my_min_longlong] |= last_marked[my_min_longlong]; } else { #pragma omp simd for (size_t longlong_index = my_min_longlong; longlong_index <= my_max_longlong; longlong_index++) { const unsigned long long new_val = last_marked[longlong_index]; if (new_val) { marked[longlong_index] |= new_val; } } } const unsigned long long end_saving = current_time_ns(); *setting_time = start_saving - start_setting; *saving_time = end_saving - start_saving; return count_signals; } int main(int argc, char **argv) { assert(sizeof(long) == sizeof(uint64_t)); // For preds #ifdef USE_CRC crcInit(); #endif if (argc < 4) { fprintf(stderr, "usage: %s scale edgefactor num-bfs-roots\n", argv[0]); fprintf(stderr, " scale = log_2(# vertices)\n"); fprintf(stderr, " edgefactor = .5 * (average vertex degree)\n"); fprintf(stderr, " num-bfs-roots = # of roots to build a tree from " "[optional]\n"); fprintf(stderr, "\n"); fprintf(stderr, " For scale, the Graph500 benchmark defines the " "following presets:\n"); fprintf(stderr, " toy = 26\n"); fprintf(stderr, " mini = 29\n"); fprintf(stderr, " small = 32\n"); fprintf(stderr, " medium = 36\n"); fprintf(stderr, " large = 39\n"); fprintf(stderr, " huge = 42\n"); fprintf(stderr, " The standard choice for edgefactor is 16\n"); return 1; } const uint64_t scale = atoi(argv[1]); const uint64_t edgefactor = atoi(argv[2]); const uint64_t nglobaledges = (uint64_t)(edgefactor << scale); const uint64_t nglobalverts = (uint64_t)(((uint64_t)1) << scale); const int num_bfs_roots = atoi(argv[3]); // __sighandler_t serr = signal(SIGUSR1, sig_handler); // assert(serr != SIG_ERR); // int kill_seconds = 120; // pthread_t thread; // const int perr = pthread_create(&thread, NULL, kill_func, // (void *)&kill_seconds); // assert(perr == 0); int provided; shmemx_init_thread(SHMEMX_THREAD_MULTIPLE, &provided); assert(provided == SHMEMX_THREAD_MULTIPLE); uint64_t i; int nthreads, err; #pragma omp parallel #pragma omp master { nthreads = omp_get_num_threads(); } #ifndef RUNTIME_SAFETY shmemx_domain_t *domains = (shmemx_domain_t *)malloc( nthreads * sizeof(*domains)); shmemx_ctx_t *contexts = (shmemx_ctx_t *)malloc( nthreads * CONTEXTS_PER_THREAD * sizeof(*contexts)); assert(domains && contexts); err = shmemx_domain_create(SHMEMX_THREAD_SINGLE, nthreads, domains); assert(err == 0); for (i = 0; i < nthreads; i++) { for (int j = 0; j < CONTEXTS_PER_THREAD; j++) { err = shmemx_ctx_create(domains[i], contexts + (i * CONTEXTS_PER_THREAD + j)); assert(err == 0); } } #else shmemx_ctx_t *contexts = NULL; shmemx_domain_t *domains = NULL; #endif pe = shmem_my_pe(); npes = shmem_n_pes(); uint_fast32_t seed[5]; uint64_t seed1 = 2, seed2 = 3; make_mrg_seed(seed1, seed2, seed); const uint64_t edges_per_pe = (nglobaledges + npes - 1) / npes; const uint64_t start_edge_index = pe * edges_per_pe; int64_t nedges_this_pe = edges_per_pe; if (start_edge_index + nedges_this_pe > nglobaledges) { nedges_this_pe = nglobaledges - start_edge_index; if (nedges_this_pe < 0) nedges_this_pe = 0; } if (pe == 0) { fprintf(stderr, "%llu: %lu total vertices, %lu total edges, %d PEs, ~%lu edges per " "PE, ~%lu vertices per PE\n", current_time_ns(), nglobalverts, nglobaledges, npes, edges_per_pe, get_vertices_per_pe(nglobalverts)); } /* * Use the Graph500 utilities to generate a set of edges distributed across * PEs. */ #ifdef VERBOSE fprintf(stderr, "PE %d malloc-ing %llu bytes (E)\n", shmem_my_pe(), nedges_this_pe * sizeof(packed_edge)); #endif packed_edge *actual_buf = (packed_edge *)malloc( nedges_this_pe * sizeof(packed_edge)); assert(actual_buf || nedges_this_pe == 0); generate_kronecker_range(seed, scale, start_edge_index, start_edge_index + nedges_this_pe, actual_buf); /* * Count the number of edge endpoints in actual_buf that are resident on * each PE. */ #ifdef VERBOSE fprintf(stderr, "PE %d calloc-ing %llu bytes (A)\n", shmem_my_pe(), npes * sizeof(long long)); const unsigned long long start_counting_edges = current_time_ns(); #endif long long *count_edges_shared_with_pe = (long long *)calloc(npes, sizeof(long long)); assert(count_edges_shared_with_pe); for (i = 0; i < nedges_this_pe; i++) { int64_t v0 = get_v0_from_edge(actual_buf + i); int64_t v1 = get_v1_from_edge(actual_buf + i); int v0_pe = get_owner_pe(v0, nglobalverts); int v1_pe = get_owner_pe(v1, nglobalverts); count_edges_shared_with_pe[v0_pe] += 1; count_edges_shared_with_pe[v1_pe] += 1; } /* * Tell each PE how many edges you have to send it based on vertex * ownership. */ #ifdef VERBOSE fprintf(stderr, "PE %d malloc-ing %llu bytes (F)\n", shmem_my_pe(), npes * sizeof(long long)); const unsigned long long start_getting_offsets = current_time_ns(); fprintf(stderr, "PE %d time to count edges = %f ms\n", shmem_my_pe(), (double)(start_getting_offsets - start_counting_edges) / 1000000.0); #endif long long *remote_offsets = (long long *)malloc(npes * sizeof(long long)); assert(remote_offsets); for (i = 0; i < npes; i++) { remote_offsets[i] = shmem_longlong_fadd((long long int *)&n_local_edges, count_edges_shared_with_pe[i], i); } free(count_edges_shared_with_pe); shmem_barrier_all(); #ifdef VERBOSE fprintf(stderr, "PE %d shmem_malloc-ing %llu bytes (A)\n", shmem_my_pe(), SHMEM_REDUCE_SYNC_SIZE * sizeof(long)); #endif int *pWrkInt = (int *)shmem_malloc(SHMEM_REDUCE_MIN_WRKDATA_SIZE * sizeof(*pWrkInt)); long long *pWrkLongLong = (long long *)shmem_malloc( SHMEM_REDUCE_MIN_WRKDATA_SIZE * sizeof(*pWrkLongLong)); long *pWrkLong = (long *)shmem_malloc( SHMEM_REDUCE_MIN_WRKDATA_SIZE * sizeof(*pWrkLong)); assert(pWrkInt && pWrkLongLong && pWrkLong); long *pSync = (long *)shmem_malloc(SHMEM_REDUCE_SYNC_SIZE * sizeof(long)); #ifdef VERBOSE fprintf(stderr, "PE %d shmem_malloc-ing %llu bytes (B)\n", shmem_my_pe(), SHMEM_REDUCE_SYNC_SIZE * sizeof(long)); #endif long *pSync2 = (long *)shmem_malloc(SHMEM_REDUCE_SYNC_SIZE * sizeof(long)); assert(pSync && pSync2); for (i = 0; i < SHMEM_REDUCE_SYNC_SIZE; i++) { pSync[i] = SHMEM_SYNC_VALUE; pSync2[i] = SHMEM_SYNC_VALUE; } shmem_longlong_max_to_all((long long int *)&max_n_local_edges, (long long int *)&n_local_edges, 1, 0, 0, npes, pWrkLongLong, pSync); if (pe == 0) { fprintf(stderr, "%llu: Max. # local edges = %lld\n", current_time_ns(), max_n_local_edges); } uint64_t local_min_vertex = get_starting_vertex_for_pe(pe, nglobalverts); uint64_t local_max_vertex = get_ending_vertex_for_pe(pe, nglobalverts); uint64_t n_local_vertices; if (local_min_vertex >= local_max_vertex) { n_local_vertices = 0; } else { n_local_vertices = local_max_vertex - local_min_vertex; } /* * Allocate buffers on each PE for storing all edges for which at least one * of the vertices of the edge is handled by this PE. This information will * be provided by other PEs. */ #ifdef VERBOSE fprintf(stderr, "PE %d shmem_malloc-ing %llu bytes (C), nedges_this_pe = " "%ld\n", shmem_my_pe(), max_n_local_edges * sizeof(packed_edge), nedges_this_pe); const unsigned long long start_sending_edges = current_time_ns(); fprintf(stderr, "PE %d time to get offsets = %f ms\n", shmem_my_pe(), (double)(start_sending_edges - start_getting_offsets) / 1000000.0); #endif packed_edge *local_edges = (packed_edge *)shmem_malloc( max_n_local_edges * sizeof(packed_edge)); assert(local_edges); /* * Send out to each PE based on the vertices each owns, all edges that have * a vertex on that node. This means that vertices which have one vertex on * one node and one vertex on another will be sent to two different nodes. */ unsigned max_count = 0; #pragma omp parallel default(none) reduction(max:max_count) \ firstprivate(actual_buf, npes, nedges_this_pe) { unsigned local_max_count = 0; #pragma omp for for (int p = 0; p < npes; p++) { unsigned count = 0; for (int i = 0; i < nedges_this_pe; i++) { int64_t v0 = get_v0_from_edge(actual_buf + i); int64_t v1 = get_v1_from_edge(actual_buf + i); int v0_pe = get_owner_pe(v0, nglobalverts); int v1_pe = get_owner_pe(v1, nglobalverts); if (v0_pe == p) { count++; } if (v1_pe == p) { count++; } } if (count > local_max_count) { local_max_count = count; } } max_count = local_max_count; } packed_edge *tmp_buf = (packed_edge *)malloc( nthreads * max_count * sizeof(packed_edge)); assert(tmp_buf); #pragma omp parallel for default(none) firstprivate(npes, remote_offsets, \ contexts, nedges_this_pe, actual_buf, local_edges, tmp_buf, max_count) for (int p = 0; p < npes; p++) { packed_edge *my_tmp_buf = tmp_buf + (omp_get_thread_num() * max_count); #ifndef RUNTIME_SAFETY const shmemx_ctx_t ctx = contexts[omp_get_thread_num() * CONTEXTS_PER_THREAD]; #endif int count = 0; for (unsigned i = 0; i < nedges_this_pe; i++) { int64_t v0 = get_v0_from_edge(actual_buf + i); int64_t v1 = get_v1_from_edge(actual_buf + i); int v0_pe = get_owner_pe(v0, nglobalverts); int v1_pe = get_owner_pe(v1, nglobalverts); if (v0_pe == p) { memcpy(my_tmp_buf + count, actual_buf + i, sizeof(packed_edge)); count++; } if (v1_pe == p) { memcpy(my_tmp_buf + count, actual_buf + i, sizeof(packed_edge)); count++; } } #ifndef RUNTIME_SAFETY shmemx_ctx_putmem(local_edges + remote_offsets[p], my_tmp_buf, count * sizeof(packed_edge), p, ctx); shmemx_ctx_quiet(ctx); #else shmem_putmem(local_edges + remote_offsets[p], my_tmp_buf, count * sizeof(packed_edge), p); shmem_quiet(); #endif } free(tmp_buf); free(remote_offsets); #ifdef VERBOSE fprintf(stderr, "PE %d done sending edges\n", shmem_my_pe()); #endif shmem_barrier_all(); free(actual_buf); #ifdef VERBOSE fprintf(stderr, "PE %d calloc-ing %llu bytes (B)\n", shmem_my_pe(), (n_local_vertices + 1) * sizeof(unsigned)); const unsigned long long start_calcing_offsets = current_time_ns(); fprintf(stderr, "PE %d time to send edges = %f ms\n", shmem_my_pe(), (double)(start_calcing_offsets - start_sending_edges) / 1000000.0); #endif unsigned *local_vertex_offsets = (unsigned *)calloc( (n_local_vertices + 1), sizeof(unsigned)); assert(local_vertex_offsets); /* * Location i in local_vertex_offsets stores the number of endpoints in * local_edges that have locale vertix i as one of the endpoints. Hence, it * is the total number of edge endpoints that are vertix i. */ for (i = 0; i < n_local_edges; i++) { packed_edge *edge = local_edges + i; int64_t v0 = get_v0_from_edge(edge); int64_t v1 = get_v1_from_edge(edge); assert(get_owner_pe(v0, nglobalverts) == pe || get_owner_pe(v1, nglobalverts) == pe); if (get_owner_pe(v0, nglobalverts) == pe) { local_vertex_offsets[v0 - local_min_vertex]++; } if (get_owner_pe(v1, nglobalverts) == pe) { local_vertex_offsets[v1 - local_min_vertex]++; } } #ifdef VERBOSE const unsigned long long done_incrementing = current_time_ns(); fprintf(stderr, "PE %d time to increment = %f ms\n", shmem_my_pe(), (double)(done_incrementing - start_calcing_offsets) / 1000000.0); #endif /* * After this loop, location i in local_vertex_offsets stores a global * offset for vertix i in a local list of all endpoints stored on this PE. * The total number of local endpoints is the number of endpoints on the * locally stored edges that are for a vertix assigned to this PE (where the * locally stored edges are defined to be all edges that have at least one * vertix on this node). The sum of all local endpoints (the value in acc * after this loop) must be >= n_local_edges because each local edge must * have at least one endpoint that is a vertix on this node, but * <= n_local_edges * 2 because each edge can have at most 2 endpoints that * are vertices on this node. */ uint64_t acc = 0; for (i = 0; i < n_local_vertices; i++) { uint64_t new_acc = acc + local_vertex_offsets[i]; local_vertex_offsets[i] = new_acc; // point to the last element acc = new_acc; } local_vertex_offsets[n_local_vertices] = acc; assert(acc >= n_local_edges && acc <= n_local_edges * 2); /* * In neighbors, for each local endpoint discovered above we store the * destination vertex for that endpoint. So, after this loop, given local * vertex i: * * - its global vertex ID would be local_min_vertex + i * - the list of global vertix IDs it is attached to by edges starts at * local_vertex_offsets[i] and ends at local_vertex_offsets[i + 1] */ #ifdef VERBOSE fprintf(stderr, "PE %d malloc-ing %llu bytes (D)\n", shmem_my_pe(), acc * 2 * sizeof(uint64_t)); #endif uint64_t *neighbors = (uint64_t *)malloc(acc * 2 * sizeof(uint64_t)); assert(neighbors); for (i = 0; i < n_local_edges; i++) { packed_edge *edge = local_edges + i; int64_t v0 = get_v0_from_edge(edge); int64_t v1 = get_v1_from_edge(edge); if (get_owner_pe(v0, nglobalverts) == pe) { neighbors[local_vertex_offsets[v0 - local_min_vertex] - 1] = v1; local_vertex_offsets[v0 - local_min_vertex]--; } if (get_owner_pe(v1, nglobalverts) == pe) { neighbors[local_vertex_offsets[v1 - local_min_vertex] - 1] = v0; local_vertex_offsets[v1 - local_min_vertex]--; } } #ifdef VERBOSE const unsigned long long done_neighboring = current_time_ns(); fprintf(stderr, "PE %d time to neighbor = %f ms\n", shmem_my_pe(), (double)(done_neighboring - done_incrementing) / 1000000.0); #endif // Remove duplicate edges in neighbors uint64_t writing_index = 0; for (i = 0; i < n_local_vertices; i++) { const unsigned start = local_vertex_offsets[i]; const unsigned end = local_vertex_offsets[i + 1]; assert(start <= end); local_vertex_offsets[i] = writing_index; qsort(neighbors + start, end - start, sizeof(*neighbors), compare_uint64_t); uint64_t reading_index = start; while (reading_index < end) { unsigned j = reading_index + 1; while (j < end && neighbors[j] == neighbors[reading_index]) { j++; } neighbors[writing_index++] = neighbors[reading_index]; reading_index = j; } } local_vertex_offsets[n_local_vertices] = writing_index; #ifdef VERBOSE fprintf(stderr, "PE %d realloc-ing from %llu bytes to %llu bytes with %d " "local vertices\n", shmem_my_pe(), acc * 2 * sizeof(uint64_t), writing_index * sizeof(uint64_t), n_local_vertices); const unsigned long long start_calcing_global_vert_to_local_neighbors_offsets = current_time_ns(); fprintf(stderr, "PE %d time to uniqify = %f ms\n", shmem_my_pe(), (double)(start_calcing_global_vert_to_local_neighbors_offsets - done_neighboring) / 1000000.0); #endif neighbors = (uint64_t *)realloc(neighbors, writing_index * sizeof(uint64_t)); assert(writing_index == 0 || neighbors); int *global_vert_to_local_neighbors_offsets = (int *)malloc( (nglobalverts + 1) * sizeof(*global_vert_to_local_neighbors_offsets)); assert(global_vert_to_local_neighbors_offsets); memset(global_vert_to_local_neighbors_offsets, 0x00, (nglobalverts + 1) * sizeof(*global_vert_to_local_neighbors_offsets)); int *global_vert_to_local_neighbors = (int *)malloc( writing_index * sizeof(*global_vert_to_local_neighbors)); assert(writing_index == 0 || global_vert_to_local_neighbors); for (int i = 0; i < n_local_vertices; i++) { const unsigned neighbors_start = local_vertex_offsets[i]; const unsigned neighbors_end = local_vertex_offsets[i + 1]; for (int j = neighbors_start; j < neighbors_end; j++) { uint64_t neighbor = neighbors[j]; global_vert_to_local_neighbors_offsets[neighbor]++; } } #ifdef VERBOSE fprintf(stderr, "PE %d computed neighbor counts\n", shmem_my_pe()); #endif for (int i = 1; i < nglobalverts; i++) { global_vert_to_local_neighbors_offsets[i] += global_vert_to_local_neighbors_offsets[i - 1]; } global_vert_to_local_neighbors_offsets[nglobalverts] = global_vert_to_local_neighbors_offsets[nglobalverts - 1]; #ifdef VERBOSE fprintf(stderr, "PE %d computed neighbor prefix sums\n", shmem_my_pe()); #endif for (int i = 0; i < n_local_vertices; i++) { const unsigned neighbors_start = local_vertex_offsets[i]; const unsigned neighbors_end = local_vertex_offsets[i + 1]; for (int j = neighbors_start; j < neighbors_end; j++) { uint64_t neighbor = neighbors[j]; const int neighbor_offset = global_vert_to_local_neighbors_offsets[neighbor] - 1; assert(neighbor_offset >= 0 && neighbor_offset < writing_index); global_vert_to_local_neighbors[neighbor_offset] = i; global_vert_to_local_neighbors_offsets[neighbor] = neighbor_offset; } } #ifdef VERBOSE fprintf(stderr, "PE %d computed neighbor offsets\n", shmem_my_pe()); const unsigned long long start_double_checking = current_time_ns(); fprintf(stderr, "PE %d time to calc local neighbor offsets = %f ms\n", shmem_my_pe(), (double)(start_double_checking - start_calcing_global_vert_to_local_neighbors_offsets) / 1000000.0); #endif size_t n_local_edges = 0; size_t n_remote_edges = 0; // Just some double checking for (i = 0; i < n_local_vertices; i++) { const unsigned neighbors_start = local_vertex_offsets[i]; const unsigned neighbors_end = local_vertex_offsets[i + 1]; int j; for (j = neighbors_start; j < neighbors_end; j++) { if (neighbors[j] >= nglobalverts) { fprintf(stderr, "Invalid neighbor at i = %llu / %llu, j = %u " "(%u -> %u)\n", i, n_local_vertices, j, neighbors_start, neighbors_end); assert(0); } if (get_owner_pe(neighbors[j], nglobalverts) == pe) { n_local_edges++; } else { n_remote_edges++; } } } fprintf(stderr, "PE %d has %d local vertices. %lu " "local edges, %lu remote edges. min vertex = %lu, max_vertex = " "%lu\n", pe, n_local_vertices, n_local_edges, n_remote_edges, local_min_vertex, local_max_vertex); // For debugging, print all vertices // { // int k; // for (k = 0; k < npes; k++) { // if (k == shmem_my_pe()) { // for (i = 0; i < n_local_vertices; i++) { // const unsigned neighbors_start = local_vertex_offsets[i]; // const unsigned neighbors_end = local_vertex_offsets[i + 1]; // fprintf(stderr, "HOWDY %d :", local_min_vertex + i); // int j; // for (j = neighbors_start; j < neighbors_end; j++) { // fprintf(stderr, " %d", neighbors[j]); // } // fprintf(stderr, "\n"); // } // } // shmem_barrier_all(); // } // for (k = 0; k < npes; k++) { // if (k == shmem_my_pe()) { // for (i = 0; i < nglobalverts; i++) { // const int start = global_vert_to_local_neighbors_offsets[i]; // const int end = global_vert_to_local_neighbors_offsets[i + 1]; // fprintf(stderr, "FOO %d %d : %d -> %d\n", shmem_my_pe(), i, start, end); // } // } // shmem_barrier_all(); // } // } shmem_free(local_edges); long long *my_n_signalled = (long long *)shmem_malloc(sizeof(*my_n_signalled)); assert(my_n_signalled); long long *total_n_signalled = (long long *)shmem_malloc(sizeof(*total_n_signalled)); assert(total_n_signalled); const size_t visited_longlongs = ((nglobalverts + BITS_PER_LONGLONG - 1) / BITS_PER_LONGLONG); unsigned long long *marked = (unsigned long long *)shmem_malloc( visited_longlongs * sizeof(long long)); unsigned long long *last_marked = (unsigned long long *)shmem_malloc( visited_longlongs * sizeof(long long)); unsigned long long *marking = (unsigned long long *)shmem_malloc( visited_longlongs * sizeof(long long)); unsigned long long *local_marking = (unsigned long long *)malloc( visited_longlongs * sizeof(long long)); assert(marked && last_marked && marking && local_marking); long *preds = (long *)shmem_malloc(nglobalverts * sizeof(*preds)); assert(preds); const size_t visited_ints = ((nglobalverts + BITS_PER_INT - 1) / BITS_PER_INT); const size_t visited_bytes = visited_ints * sizeof(unsigned); int old; unsigned run; for (run = 0; run < num_bfs_roots; run++) { memset(marked, 0x00, visited_longlongs * sizeof(long long)); memset(last_marked, 0x00, visited_longlongs * sizeof(long long)); memset(marking, 0x00, visited_longlongs * sizeof(long long)); memset(local_marking, 0x00, visited_longlongs * sizeof(long long)); memset(preds, 0x00, nglobalverts * sizeof(*preds)); uint64_t root = 0; if (get_owner_pe(root, nglobalverts) == pe) { set_visited_longlong(root, last_marked, visited_longlongs); } const size_t my_min_longlong = local_min_vertex / BITS_PER_LONGLONG; const size_t my_max_longlong = local_max_vertex / BITS_PER_LONGLONG; const size_t min_word_to_send = local_min_vertex / BITS_PER_INT; const size_t max_word_to_send = local_max_vertex / BITS_PER_INT; const size_t words_to_send = max_word_to_send - min_word_to_send - 1; unsigned long long *min_longlong_ptr = ((unsigned long long *)marked) + my_min_longlong; unsigned long long *max_longlong_ptr = ((unsigned long long *)marked) + my_max_longlong; shmem_barrier_all(); const unsigned long long start_bfs = current_time_ns(); int iter = 0; long long last_n_new_nodes = 1; long long delta_new_nodes = 1; long long delta_delta_new_nodes = 0; do { *my_n_signalled = 0; *total_n_signalled = 0; unsigned count_local_atomics = 0; const unsigned long long start_handling = current_time_ns(); unsigned long long setting_time, saving_time; ssize_t min_written_index = my_min_longlong; ssize_t max_written_index = my_max_longlong; if (n_local_vertices > 0) { memset(local_marking, 0x00, visited_longlongs * sizeof(long long)); *my_n_signalled = traverse_all_verts(nglobalverts, marked, last_marked, local_marking, marking, global_vert_to_local_neighbors_offsets, global_vert_to_local_neighbors, local_min_vertex, local_max_vertex, visited_longlongs, &setting_time, &saving_time, local_vertex_offsets, neighbors, iter, preds); } const unsigned long long start_atomics = current_time_ns(); unsigned long long start_reduction; #pragma omp parallel reduction(+:count_local_atomics) default(none) \ firstprivate(contexts, npes, pe, local_marking, marking, iter, \ min_longlong_ptr, max_longlong_ptr, last_marked, \ min_written_index, max_written_index, marked, nthreads) shared(start_reduction) { #ifndef RUNTIME_SAFETY int active_ctx_index = 0; shmemx_ctx_t ctx = contexts[omp_get_thread_num() * CONTEXTS_PER_THREAD + active_ctx_index]; #endif unsigned count_thread_atomics = 0; memset(last_marked, 0x00, visited_longlongs * sizeof(*last_marked)); // #pragma omp for simd schedule(static) nowait // for (int i = 0; i < visited_longlongs; i++) { // last_marked[i] = 0; // } #pragma omp for schedule(dynamic,1) nowait for (int p = 0; p < npes; p++) { const int target_pe = (pe + p) % npes; const size_t min_longlong = get_starting_vertex_for_pe(target_pe, nglobalverts) / BITS_PER_LONGLONG; const size_t max_longlong = (get_ending_vertex_for_pe(target_pe, nglobalverts) - 1) / BITS_PER_LONGLONG; for (int l = min_longlong; l <= max_longlong; l++) { const unsigned long long mask = local_marking[l]; if (mask) { #ifndef RUNTIME_SAFETY shmemx_ctx_ulonglong_atomic_or(marking + l, mask, target_pe, ctx); #else shmemx_ulonglong_atomic_or(marking + l, mask, target_pe); #endif count_thread_atomics++; #ifndef RUNTIME_SAFETY if (count_thread_atomics % 1 == 0) { active_ctx_index = (active_ctx_index + 1) % CONTEXTS_PER_THREAD; ctx = contexts[omp_get_thread_num() * CONTEXTS_PER_THREAD + active_ctx_index]; shmemx_ctx_quiet(ctx); } #endif } } } // end omp for count_local_atomics += count_thread_atomics; #pragma omp master start_reduction = current_time_ns(); const ssize_t longlong_to_send = my_max_longlong - my_min_longlong - 1; unsigned long long *min_longlong_ptr = marked + my_min_longlong; unsigned long long *max_longlong_ptr = marked + my_max_longlong; unsigned long long *body_ptr = marked + my_min_longlong + 1; #pragma omp for schedule(dynamic,1) nowait for (int p = 1; p < npes; p++) { const int target_pe = (pe + p) % npes; #ifndef RUNTIME_SAFETY shmemx_ctx_ulonglong_atomic_or(min_longlong_ptr, *min_longlong_ptr, target_pe, ctx); shmemx_ctx_ulonglong_atomic_or(max_longlong_ptr, *max_longlong_ptr, target_pe, ctx); if (longlong_to_send > 0) { shmemx_ctx_putmem_nbi(body_ptr, body_ptr, longlong_to_send * sizeof(unsigned long long), target_pe, ctx); } #else shmemx_ulonglong_atomic_or(min_longlong_ptr, *min_longlong_ptr, target_pe); shmemx_ulonglong_atomic_or(max_longlong_ptr, *max_longlong_ptr, target_pe); if (longlong_to_send > 0) { shmem_putmem_nbi(body_ptr, body_ptr, longlong_to_send * sizeof(unsigned long long), target_pe); } #endif } // end omp for } // end omp parallel shmem_longlong_sum_to_all(total_n_signalled, my_n_signalled, 1, 0, 0, npes, pWrkLongLong, pSync); unsigned long long *tmp = marking; marking = last_marked; last_marked = tmp; const unsigned long long start_barrier = current_time_ns(); #ifndef RUNTIME_SAFETY for (int i = 0; i < CONTEXTS_PER_THREAD * nthreads; i++) { shmemx_ctx_quiet(contexts[i]); } #endif shmem_barrier_all(); delta_delta_new_nodes = (*total_n_signalled - last_n_new_nodes) - delta_new_nodes; delta_new_nodes = *total_n_signalled - last_n_new_nodes; last_n_new_nodes = *total_n_signalled; const unsigned long long end_all = current_time_ns(); printf("PE %d, iter %d, handling %f ms (%f ms, %f ms), atomics %f ms, reduction " "%f ms, barrier %f ms, %lld new nodes locally, %lld new nodes " "in total (%lld delta, %lld delta delta), %d atomics, %ld -> %ld\n", pe, iter, (double)(start_atomics - start_handling) / 1000000.0, (double)setting_time / 1000000.0, (double)saving_time / 1000000.0, (double)(start_reduction - start_atomics) / 1000000.0, (double)(start_barrier - start_reduction) / 1000000.0, (double)(end_all - start_barrier) / 1000000.0, *my_n_signalled, *total_n_signalled, delta_new_nodes, delta_delta_new_nodes, count_local_atomics, min_written_index, max_written_index); // printf("PE %d, iter %d, # signals %d\n", pe, iter, *my_n_signalled); iter++; } while (*total_n_signalled > 0); const unsigned long long end_bfs = current_time_ns(); // For debugging on small datasets, print results // for (i = 0; i < nglobalverts; i++) { // if (get_owner_pe(i, nglobalverts) == pe) { // printf("Vertex %d : traversed by %d\n", i, // first_traversed_by[i - local_min_vertex]); // } // shmem_barrier_all(); // } // Lightweight validation // int count_not_set = 0; // int count_not_handled = 0; // int count_set = 0; // for (i = 0; i < n_local_vertices; i++) { // const int curr = first_traversed_by[i]; // if (curr == 0) { // count_not_set++; // } else if (curr > 0) { // count_not_handled++; // } else { // count_set++; // } // } // fprintf(stderr, "PE %d, run %d : set %d , not set %d , unhandled %d\n", // shmem_my_pe(), run, count_set, count_not_set, count_not_handled); if (pe == 0) { printf("BFS %d with root=%llu took %f ms, %d iters\n", run, root, (double)(end_bfs - start_bfs) / 1000000.0, iter); } } #ifndef RUNTIME_SAFETY for (i = 0; i < nthreads * CONTEXTS_PER_THREAD; i++) { shmemx_ctx_destroy(contexts[i]); } shmemx_domain_destroy(nthreads, domains); #endif shmem_finalize(); return 0; }
convolution_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+1<inch; q+=2) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* kernel0 = kernel + p*inch*4 + q*4; const float* kernel1 = kernel0 + 4; const float* r00 = img0; const float* r01 = img0 + w; const float* r10 = img1; const float* r11 = img1 + w; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v14.4s}, [%4], #16 \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v9.4s}, [%5] \n" "fmul v8.4s, v0.4s, %12.s[0] \n" "fmla v9.4s, v2.4s, %12.s[2] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v8.4s, v12.4s, %13.s[0] \n" "fmla v9.4s, v14.4s, %13.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v13.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v15.4s}, [%4], #16 \n" "fmla v8.4s, v10.4s, %12.s[1] \n" "fmla v9.4s, v11.4s, %12.s[3] \n" "ext v10.16b, v12.16b, v13.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #4 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "fmla v9.4s, v11.4s, %13.s[3] \n" "orr v0.16b, v1.16b, v1.16b \n" "orr v2.16b, v3.16b, v3.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v12.16b, v13.16b, v13.16b \n" "orr v14.16b, v15.16b, v15.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%5], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d28-d29}, [%4]! \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d18-d19}, [%5] \n"// q9 = sum "vmul.f32 q8, q0, %e12[0] \n" "vmla.f32 q9, q2, %f12[0] \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q10, q0, q1, #1 \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q8, q12, %e13[0] \n" "vmla.f32 q9, q14, %f13[0] \n" "pld [%3, #128] \n" "vld1.f32 {d26-d27}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d30-d31}, [%4]! \n" "vmla.f32 q8, q10, %e12[1] \n" "vmla.f32 q9, q11, %f12[1] \n" "vext.f32 q10, q12, q13, #1 \n" "vext.f32 q11, q14, q15, #1 \n" "vmla.f32 q8, q10, %e13[1] \n" "vmla.f32 q9, q11, %f13[1] \n" "vorr q0, q1, q1 \n" "vorr q2, q3, q3 \n" "vadd.f32 q8, q8, q9 \n" "vorr q12, q13, q13 \n" "vorr q14, q15, q15 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%5]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" "sub %3, #16 \n" "sub %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r00 = vld1_f32(r00); float32x2_t _r01 = vld1_f32(r01); float32x4_t _r00r1 = vcombine_f32(_r00, _r01); float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0); float32x2_t _r10 = vld1_f32(r10); float32x2_t _r11 = vld1_f32(r11); float32x4_t _r10r1 = vcombine_f32(_r10, _r11); _s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r00[0] * kernel0[0]; sum += r00[1] * kernel0[1]; sum += r01[0] * kernel0[2]; sum += r01[1] * kernel0[3]; sum += r10[0] * kernel1[0]; sum += r10[1] * kernel1[1]; sum += r11[0] * kernel1[2]; sum += r11[1] * kernel1[3]; *outptr += sum; #endif // __ARM_NEON r00 += 1; r01 += 1; r10 += 1; r11 += 1; outptr++; } r00 += 1; r01 += 1; r10 += 1; r11 += 1; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*4 + q*4; const float* r0 = img0; const float* r1 = img0 + w; #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(kernel0[0]); float32x4_t _k1 = vdupq_n_f32(kernel0[1]); float32x4_t _k2 = vdupq_n_f32(kernel0[2]); float32x4_t _k3 = vdupq_n_f32(kernel0[3]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v9.4s}, [%3] \n" "fmul v8.4s, v0.4s, %8.4s \n" "fmla v9.4s, v2.4s, %10.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "fmla v8.4s, v10.4s, %9.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v9.4s, v11.4s, %11.4s \n" "orr v0.16b, v1.16b, v1.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v2.16b, v3.16b, v3.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%3], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d18-d19}, [%3] \n"// q9 = sum "vmul.f32 q8, q0, %q8 \n" "vmla.f32 q9, q2, %q10 \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "vext.f32 q10, q0, q1, #1 \n" "vmla.f32 q8, q10, %q9 \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q9, q11, %q11 \n" "vorr q0, q1, q1 \n" "vadd.f32 q8, q8, q9 \n" "vorr q2, q3, q3 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%3]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); } #endif // __aarch64__ #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); #endif for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r0 = vld1_f32(r0); float32x2_t _r1 = vld1_f32(r1); float32x4_t _r0r1 = vcombine_f32(_r0, _r1); float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r0[0] * kernel0[0]; sum += r0[1] * kernel0[1]; sum += r1[0] * kernel0[2]; sum += r1[1] * kernel0[3]; *outptr += sum; #endif r0 += 1; r1 += 1; outptr++; } r0 += 1; r1 += 1; } } } }
race.c
#include <assert.h> #include <stdio.h> #include <stdlib.h> int main() { int i, counter; #pragma omp parallel for (i = 0; i < 100000; i++) counter++; printf("counter=%d\n", counter); return 0; }
GB_unop__lnot_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__lnot_int8_int8 // op(A') function: GB_unop_tran__lnot_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__lnot_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = !(z != 0) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__lnot_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandelbrot-8.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // Partially based on code by Elam Kolenovic and Sean Stanek #include <stdint.h> #include <stdlib.h> #include <stdio.h> #define LIMIT_SQUARED 4.0 #define MAX_ITERATIONS 50 int main(int argc, char ** argv){ // Ensure image_Width_And_Height are multiples of 8. const intmax_t image_Width_And_Height=(atoi(argv[1])+7)/8*8; // The image will be black and white with one bit for each pixel. Bits with // a value of zero are white pixels which are the ones that "escape" from // the Mandelbrot set. We'll be working on one line at a time and each line // will be made up of pixel groups that are eight pixels in size so each // pixel group will be one byte. This allows for some more optimizations to // be done. const intmax_t pixel_Groups_Per_Line=image_Width_And_Height/8; uint8_t * const pixels=malloc(image_Width_And_Height* image_Width_And_Height/8); // Precompute the initial real and imaginary values for each x and y // coordinate in the image. double initial_r[image_Width_And_Height], initial_i[image_Width_And_Height]; const double two_Over_Image_Width_And_Height=2.0/image_Width_And_Height; #pragma omp parallel for for(intmax_t xy=0; xy<image_Width_And_Height; xy++){ initial_r[xy]=xy*two_Over_Image_Width_And_Height - 1.5; initial_i[xy]=xy*two_Over_Image_Width_And_Height - 1.0; } #pragma omp parallel for schedule(guided) for(intmax_t y=0; y<image_Width_And_Height; y++){ const double prefetched_Initial_i=initial_i[y]; uint8_t * const line_Pixel_Groups=&pixels[y*pixel_Groups_Per_Line]; for(intmax_t x_Major=0; x_Major<image_Width_And_Height; x_Major+=8){ // pixel_Group_r and pixel_Group_i will store real and imaginary // values for each pixel in the current pixel group as we perform // iterations. Set their initial values here. double pixel_Group_r[8], pixel_Group_i[8]; const double * const current_Pixel_Group_Initial_r= &initial_r[x_Major]; for(intmax_t x_Minor=0; x_Minor<8; ++x_Minor){ pixel_Group_r[x_Minor]=current_Pixel_Group_Initial_r[x_Minor]; pixel_Group_i[x_Minor]=prefetched_Initial_i; } // If any pixels from the previous pixel group escaped then we are // likely outside the Mandelbrot set or near the edge of it so // check whether pixels escape during each iteration. If no pixels // from the previous pixel group escaped then the pixels for the // current pixel group are likely to be in the Mandelbrot set so // we'll just perform all iterations and do one final check at the // end to see if any of the pixels escaped. static uint8_t any_Pixels_Escape=1; uint8_t eight_Pixels; if(any_Pixels_Escape){ // Assume all pixels are in the Mandelbrot set initially. eight_Pixels=0xff; intmax_t iteration=MAX_ITERATIONS; do{ uint8_t current_Pixel_Bitmask=0x80; for(intmax_t x_Minor=0; x_Minor<8; x_Minor++){ // Only process the pixels that are still in the // Mandelbrot set. if(eight_Pixels & current_Pixel_Bitmask){ const double r=pixel_Group_r[x_Minor]; const double i=pixel_Group_i[x_Minor]; pixel_Group_i[x_Minor]=2.0*r*i + prefetched_Initial_i; pixel_Group_r[x_Minor]=r*r - i*i + current_Pixel_Group_Initial_r[x_Minor]; // Clear the bit for the pixel if it escapes from // the Mandelbrot set. if(r*r + i*i>LIMIT_SQUARED) eight_Pixels ^= current_Pixel_Bitmask; } current_Pixel_Bitmask>>=1; } }while(eight_Pixels && --iteration); }else{ // One more iteration is done further below which is why // MAX_ITERATIONS-1 iterations are done here instead of // MAX_ITERATIONS. for(intmax_t iteration=0; iteration<MAX_ITERATIONS-1; iteration++){ for(intmax_t x_Minor=0; x_Minor<8; x_Minor++){ const double r=pixel_Group_r[x_Minor]; const double i=pixel_Group_i[x_Minor]; pixel_Group_i[x_Minor]=2.0*i*r + prefetched_Initial_i; pixel_Group_r[x_Minor]=r*r - i*i + current_Pixel_Group_Initial_r[x_Minor]; } } // Assume all pixels escape initially. eight_Pixels=0x00; uint8_t current_Pixel_Bitmask=0x80; for(intmax_t x_Minor=0; x_Minor<8; x_Minor++){ const double r=pixel_Group_r[x_Minor]; const double i=pixel_Group_i[x_Minor]; // Set the bit for pixels that are still in the Mandelbrot // set. if(r*r + i*i<=LIMIT_SQUARED) eight_Pixels|=current_Pixel_Bitmask; current_Pixel_Bitmask>>=1; } } line_Pixel_Groups[x_Major>>3]=eight_Pixels; any_Pixels_Escape=eight_Pixels!=0xff; } } // Output the image to stdout. printf("P4\n%ju %ju\n", image_Width_And_Height, image_Width_And_Height); fwrite(pixels, image_Width_And_Height*image_Width_And_Height/8, 1, stdout); free(pixels); return 0; }
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 20; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = (n-1) / 8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); unsigned int warningBool = 0; #pragma omp parallel for shared(warningBool) for (unsigned int i=0;i<p-1;i++) { if (warningBool == 0 && modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); warningBool = 1; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
GB_unop__identity_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fp64) // op(A') function: GB (_unop_tran__identity_uint16_fp64) // C type: uint16_t // A type: double // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fp64) ( uint16_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kpoint.c
/* Copyright (C) 2008 Atsushi Togo */ /* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]); static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]); static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int num_ir; size_t i; size_t *dense_ir_mapping_table; if ((dense_ir_mapping_table = (size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_ir = kpt_get_dense_irreducible_reciprocal_mesh(grid_address, dense_ir_mapping_table, mesh, is_shift, rot_reciprocal); for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = dense_ir_mapping_table[i]; } free(dense_ir_mapping_table); dense_ir_mapping_table = NULL; return num_ir; } size_t kpt_get_dense_irreducible_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { size_t num_ir; num_ir = get_dense_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const size_t num_q, SPGCONST double qpoints[][3]) { int num_ir; size_t i; size_t *dense_ir_mapping_table; if ((dense_ir_mapping_table = (size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_ir = kpt_get_dense_stabilized_reciprocal_mesh(grid_address, dense_ir_mapping_table, mesh, is_shift, is_time_reversal, rotations, num_q, qpoints); for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = dense_ir_mapping_table[i]; } free(dense_ir_mapping_table); dense_ir_mapping_table = NULL; return num_ir; } size_t kpt_get_dense_stabilized_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const size_t num_q, SPGCONST double qpoints[][3]) { size_t num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_dense_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; size_t *dense_rot_grid_points; if ((dense_rot_grid_points = (size_t*)malloc(sizeof(size_t) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); goto err; } kpt_get_dense_grid_points_by_rotations(dense_rot_grid_points, address_orig, rot_reciprocal, mesh, is_shift); for (i = 0; i < rot_reciprocal->size; i++) { rot_grid_points[i] = dense_rot_grid_points[i]; } free(dense_rot_grid_points); dense_rot_grid_points = NULL; err: ; } void kpt_get_dense_grid_points_by_rotations(size_t rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_dense_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i, num_bz_map; size_t *dense_rot_grid_points, *dense_bz_map; if ((dense_rot_grid_points = (size_t*)malloc(sizeof(size_t) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); goto err; } num_bz_map = mesh[0] * mesh[1] * mesh[2] * 8; if ((dense_bz_map = (size_t*)malloc(sizeof(size_t) * num_bz_map)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); free(dense_rot_grid_points); dense_rot_grid_points = NULL; goto err; } for (i = 0; i < num_bz_map; i++) { dense_bz_map[i] = bz_map[i]; } kpt_get_dense_BZ_grid_points_by_rotations(dense_rot_grid_points, address_orig, rot_reciprocal, mesh, is_shift, dense_bz_map); free(dense_bz_map); dense_bz_map = NULL; for (i = 0; i < rot_reciprocal->size; i++) { rot_grid_points[i] = dense_rot_grid_points[i]; } free(dense_rot_grid_points); dense_rot_grid_points = NULL; err: ; } void kpt_get_dense_BZ_grid_points_by_rotations(size_t rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const size_t bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_dense_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { int i, num_bz_map, num_bzgp; size_t *dense_bz_map; num_bz_map = mesh[0] * mesh[1] * mesh[2] * 8; if ((dense_bz_map = (size_t*)malloc(sizeof(size_t) * num_bz_map)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_bzgp = kpt_relocate_dense_BZ_grid_address(bz_grid_address, dense_bz_map, grid_address, mesh, rec_lattice, is_shift); for (i = 0; i < num_bz_map; i++) { if (dense_bz_map[i] == num_bz_map) { bz_map[i] = -1; } else { bz_map[i] = dense_bz_map[i]; } } free(dense_bz_map); dense_bz_map = NULL; return num_bzgp; } size_t kpt_relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_dense_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) { return get_dense_ir_reciprocal_mesh_normal(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } else { return get_dense_ir_reciprocal_mesh_distortion(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } } static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ size_t i, grid_point_rot; int j; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); #pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_dense_num_ir(ir_mapping_table, mesh); } static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { size_t i, grid_point_rot; int j, k, indivisible; int address_double[3], address_double_rot[3], divisor[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (j = 0; j < 3; j++) { divisor[j] = mesh[(j + 1) % 3] * mesh[(j + 2) % 3]; } #pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); for (j = 0; j < 3; j++) { address_double[j] *= divisor[j]; } ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); for (k = 0; k < 3; k++) { indivisible = address_double_rot[k] % divisor[k]; if (indivisible) {break;} address_double_rot[k] /= divisor[k]; if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) || (address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) { indivisible = 1; break; } } if (indivisible) {continue;} grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_dense_num_ir(ir_mapping_table, mesh); } static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]) { size_t i, num_ir; num_ir = 0; #pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { if (ir_mapping_table[i] == i) { num_ir++; } } #ifdef _OPENMP for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]]; } #endif return num_ir; } static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; size_t i, boundary_num_gp, total_num_gp, bzgp, gp, num_bzmesh; int j, k, min_index; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (j = 0; j < 3; j++) { bzmesh[j] = mesh[j] * 2; } num_bzmesh = bzmesh[0] * bzmesh[1] * (size_t)(bzmesh[2]); for (i = 0; i < num_bzmesh; i++) { bz_map[i] = num_bzmesh; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * (size_t)(mesh[2]); /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; } static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i; int eq[3]; eq[0] = 0; /* a=b */ eq[1] = 0; /* b=c */ eq[2] = 0; /* c=a */ for (i = 0; i < rot_reciprocal->size; i++) { if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 1 && rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;} if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 0 && rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;} if (rot_reciprocal->mat[i][0][1] == 0 && rot_reciprocal->mat[i][1][1] == 0 && rot_reciprocal->mat[i][2][1] == 1) {eq[1] = 1;} } return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) && ((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) && ((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2]))); }
dlag2s.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlag2c.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix A from complex double to complex single precision. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] pA * The lda-by-n matrix A in double complex precision. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] pAs * On exit, the ldas-by-n matrix As in single complex precision. * * @param[in] ldas * The leading dimension of the array As. ldas >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_dlag2s * @sa plasma_slag2d * @sa plasma_dlag2s * @sa plasma_slag2d * ******************************************************************************/ int plasma_dlag2s(int m, int n, double *pA, int lda, float *pAs, int ldas) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } if (ldas < imax(1, m)) { plasma_error("illegal value of ldas"); return -6; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_lag2c(plasma, PlasmaRealDouble, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t As; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, m, n, 0, 0, m, n, &As); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_sge2desc(pAs, ldas, As, &sequence, &request); // Call tile async function. plasma_omp_dlag2s(A, As, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request); plasma_omp_sdesc2ge(As, pAs, ldas, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&As); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix A from double complex to single complex precision. * Non-blocking tile version of plasma_dlag2s(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * * @param[out] As * Descriptor of matrix As. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dlag2s * @sa plasma_omp_slag2d * @sa plasma_omp_dlag2s * @sa plasma_omp_slag2d * ******************************************************************************/ void plasma_omp_dlag2s(plasma_desc_t A, plasma_desc_t As, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(As) != PlasmaSuccess) { plasma_error("invalid As"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pdlag2s(A, As, sequence, request); }
GB_binop__land_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_uint16 // A.*B function (eWiseMult): GB_AemultB__land_uint16 // A*D function (colscale): GB_AxD__land_uint16 // D*A function (rowscale): GB_DxB__land_uint16 // C+=B function (dense accum): GB_Cdense_accumB__land_uint16 // C+=b function (dense accum): GB_Cdense_accumb__land_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint16 // C=scalar+B GB_bind1st__land_uint16 // C=scalar+B' GB_bind1st_tran__land_uint16 // C=A+scalar GB_bind2nd__land_uint16 // C=A'+scalar GB_bind2nd_tran__land_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) && (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__land_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__land_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__land_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Proyek Akhir Semester.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #define DELAY 100000 #include <omp.h> //Sistem Identifikasi Zona Daerah //Agung Firmansyah (2006577454) //Brian Christian Pangaribuan (2006577510) //Muhammad Aditya Kurniawan (200577340) //Berikut adalah program yang mendata kasus positif di suatu daerah berdasarkan gejala yang dimasukkan (1 - 10). //Jika gejala yang diinput totalnya lebih besar daripada 25, penduduk dinyatakan positif. Total penduduk yang //positif pada suatu daerah dibagi dengan total penduduknya jika lebih besar dan sama dengan 0.51, daerah //dinyatakan zona merah. Lamanya waktu diukur menggunakan OpenMP di line ke-47, di mana waktu berbanding //lurus dengan jumlah penduduk dan/atau daerah. typedef struct warga{ //Penduduk yang akan didata int gejala; char positif[1]; //status korona, 1 adalah size positif, yakni + atau - saja struct warga *next; //penduduk berikutnya char nama[]; }warga; typedef struct daerah{ //Daerah yang akan didata int Positif; // int nWarga; char zona[5]; //Zona merah/hijau struct warga *head; struct daerah *next; //Daerah berikutnya char kota[100]; //Nama daerah }daerah; //Prototype void printGejala();//Opsi gejala yang akan dipilih warga *buatinPenduduk();//Input nama, gejala, dan status penduduk dalam linked list daerah *buatinDaerah();//Input nama daerah dan jumlah penduduk dalam linked list void printPenduduk(warga *temp, FILE *fptr);//output nama dan status penduduk void printsemua(daerah *temp, FILE *fptr, int waktu);//output keterangan program, nama daerah, total penduduk yang positif, dan penduduk yang didata int main(){ daerah *head = NULL;//node diawal masih kosong daerah *pred = NULL, *temp; //node di daerah warga *Wpred = NULL, *Wtemp;// node di penduduk int Daerah,i,j,del; int waktu, waktu_1, waktu_2;//agar hari bukan bilangan pecahan FILE *fptr;//deklarasi file fptr = fopen("adit.txt","w");//tipe file ditulis printf (" --------------------------------------------------------------------------------------------------\n"); printf("| Masukkan jumlah daerah: ");//input jumlah daerah scanf("%d", &Daerah); waktu_1 = omp_get_wtime();//mulai #pragma omp for for(i=0;i<Daerah;i++){ temp = NULL;//temp daerah masih kosong temp = buatinDaerah();//call buatinDaerah dan dimasukkan sementara ke temp daerah Wpred = temp->head;//Wpred penduduk menjadi node awal di daerah for(j=0;j<temp->nWarga;j++){ Wtemp = NULL;//Wtemp penduduk masih kosong Wtemp = buatinPenduduk();//call buatinPenduduk if(Wtemp->positif[0] == '+') temp->Positif += 1;//sebagai total positif penduduk di daerah if(Wpred == NULL){//penduduk belum/tidak ada temp->head = Wtemp;//buat penduduk di awal node }//end if else{//jika penduduk sudah ada Wpred->next = Wtemp;//node next menunjuk ke penduduk berikutnya }//end else Wpred = Wtemp; }//end for for (del=0;del<DELAY;del++);//Delay agar waktu tidak 0 if(1.0 * temp->Positif / temp->nWarga >= 0.51){//Jika jumlah positif per total penduduk >= 51% strcpy(temp->zona , "Merah");//Zona merah }//end if else strcpy(temp->zona , "Hijau");//Zona hijau if(pred == NULL) head = temp;//daerah belum/tidak ada else pred->next = temp;//node next menunjuk ke daerah berikutnya pred = temp; }//end for waktu_2 = omp_get_wtime();//selesai waktu = (waktu_2 - waktu_1)/6;//total waktu (hari) printsemua(head, fptr, waktu);//call fungsi printsemua fclose (fptr);//file ditutup return 0; }//end main void printGejala(){ #pragma omp parallel num_threads(4)//Membagi menjadi 4 threads { #pragma omp master //Agar fungsi yang terdapatg printf dijalankan hanya sekali, bukan 4 { printf ("| Daftar Gejala dari teringan hingga terberat:\n"); printf ("| 1. Hidung tersumbat\n| 2. Batuk ringan\n| 3. Sakit kepala\n| 4. Asma\n| 5. Diare\n| 6. Demam tinggi\n"); printf ("| 7. Kulit, bibir, dan kuku membiru\n| 8. Kehilangan indera perasa\n| 9. Kehilangan indera penciuman\n| 10. Pneumonia kronis\n"); printf ("| Masukkan -1 untuk berhenti menginput gejala!\n"); printf ("|-------------------------------------------------------------------------------------------\n"); }//end #pragma omp master }//end #pragma omp parallel }//end PrintGejala warga *buatinPenduduk(){ warga *newNode; newNode = (warga*) malloc (sizeof(warga));//membuat node penduduk newNode->next = NULL;//node next belum diisi newNode->gejala = 0;//node gejala belum diisi printf ("|-------------------------------------------------------------------------------------------\n"); printf("| Nama penduduk : "); fflush(stdin); scanf("%[^\n]s", &newNode->nama);//input nama menggunakan spasi printGejala();//call printGejala printf ("|-------------------------------------------------------------------------------------------\n"); printf("| Masukkan gejala : \n"); int Gejala[10] = {0,0,0,0,0,0,0,0,0,0};//array gejala belum diisi int input; do{ printf ("| "); scanf("%d", &input);//input gejala penduduk if(input<11 && input>0)//input normal (kondisi tidak error) if(Gejala[input] == 0){//kondisi agar perulangan gejala yang sama per penduduk tidak dijumlahkan Gejala[input] = 1; newNode->gejala += input;//menjumlahkan gejala per penduduk berdasarkan urutan opsi }//end if }while(input != -1 && input>0 && input<11);//looping normal (kondisi tidak error) if(newNode->gejala >= 25){//Jika total gejala per penduduk >= 25 newNode->positif[0] = '+';//penduduk positif korona }//end if else newNode->positif[0] = '-';//penduduk negatif korona return newNode; }//end *buatinPenduduk daerah *buatinDaerah(){ daerah *baru; baru = (daerah*) malloc (sizeof(daerah));//membuat node daerah baru->next = NULL;//node next daerah belum diisi baru->head = NULL;//node awal daerah belum diisi baru->Positif = 0;//total jumlah penduduk yang positif belum diisi printf ("|-------------------------------------------------------------------------------------------\n|"); printf("\n| Nama daerah : ");//input nama daerah scanf("%s", &baru->kota); printf ("|-------------------------------------------------------------------------------------------\n"); printf("| Jumlah penduduk : ");//input jumlah penduduk scanf("%d", &baru->nWarga); return baru; }//end *buatinDaerah void printPenduduk(warga *temp, FILE *fptr){ fprintf(fptr, "("); while(temp != NULL){ if(temp->next != NULL) fprintf(fptr, "%s [%s], ",temp->nama,temp->positif);//untuk tanda koma terletak di akhir else fprintf(fptr, "%s [%s]",temp->nama,temp->positif);//untuk tanda koma terletak di akhir temp = temp->next; }//end while fprintf(fptr, ")\n"); }//end printPenduduk void printsemua(daerah *temp, FILE *fptr, int waktu){ //keterangan program fprintf (fptr," ===============================================================================================================\n"); fprintf (fptr,"| Berikut adalah program yang mendata kasus positif di suatu daerah berdasarkan gejala yang dimasukkan (1 - 10).\n"); fprintf (fptr,"| Jika gejala yang diinput totalnya lebih besar daripada 25, penduduk dinyatakan positif.\n"); fprintf (fptr,"| Total penduduk yang positif pada suatu daerah dibagi dengan total penduduknya jika lebih besar dan sama dengan 0.51,\n"); fprintf (fptr,"| daerah dinyatakan zona merah.\n"); fprintf (fptr," ===============================================================================================================\n"); fprintf(fptr,"| Kasus korona yang berlangsung selama %d hari mempunyai data sebagai berikut\n", waktu); //output nama daerah, total penduduk yang positif, dan penduduk yang didata while(temp != NULL){ fprintf(fptr," ==============================================================================\n|\n"); fprintf(fptr,"| Nama Daerah : %s [%s]", temp->kota, temp->zona); fprintf(fptr,"\n| Jumlah Positif : %d", temp->Positif); fprintf(fptr,"\n| Daftar Warga : "); printPenduduk(temp->head, fptr);//call printPenduduk fprintf (fptr,"|\n ==============================================================================\n\n\n"); temp = temp->next; }//end while }//end printsemua
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
kernel.h
#ifndef __KERNEL_H__ #define __KERNEL_H__ #include "../sparse_formats/csr.h" namespace CTF{ #ifdef __CUDACC__ #define NBLK 15 #define NTRD 512 template<typename dtype_A, typename dtype_B, typename dtype_C, dtype_C(*f)(dtype_A, dtype_B), void(*g)(dtype_C, dtype_C&)> __global__ void cuda_gemmf(char tA, char tB, int m, int n, int k, dtype_A const * A, dtype_B const * B, dtype_C * C){ int bidx = blockIdx.x; int tidx = threadIdx.x; int lda_A_m = tA == 'N' ? 1 : k; int lda_A_k = tA == 'N' ? m : 1; int lda_B_k = tB == 'N' ? 1 : n; int lda_B_n = tB == 'N' ? k : 1; for (int mi=bidx; mi<m; mi+=NBLK){ for (int ni=tidx; ni<n; ni+=NTRD){ for (int ki=0; ki<k; ki++){ g(f(A[mi*lda_A_m+ki*lda_A_k], B[ki*lda_B_k+ni*lda_B_n]), C[mi +ni*m]); } } } } template<typename dtype_A, typename dtype_B, typename dtype_C, dtype_C(*f)(dtype_A, dtype_B), void(*g)(dtype_C, dtype_C&)> __device__ void cuda_csrmmf(int m, int n, int k, dtype_A const * A, int const * JA, int const * IA, dtype_B const * B, dtype_C * C){ int bidx = blockIdx.x; int tidx = threadIdx.x; for (int col_B=bidx; col_B<n; col_B+=NBLK){ for (int row_A=tidx; row_A<m; row_A+=NTRD){ for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){ int col_A = JA[i_A]-1; g(f(A[i_A],B[col_B*k+col_A]),C[col_B*m+row_A]); } } } } template<typename dtype_A, typename dtype_B, typename dtype_C, dtype_C(*f)(dtype_A, dtype_B), void(*g)(dtype_C, dtype_C&)> __device__ void cuda_csrmmf(int m, int n, int k, dtype_A const * A, int const * JA, int const * IA, dtype_B const * B, dtype_C * C){ int bidx = blockIdx.x; int tidx = threadIdx.x; for (int col_B=bidx; col_B<n; col_B+=NBLK){ for (int row_A=tidx; row_A<m; row_A+=NTRD){ for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){ int col_A = JA[i_A]-1; g(f(A[i_A],B[col_B*k+col_A]),C[col_B*m+row_A]); } } } } //FIXME there is code replication here with ../sparse_foramts/csr.cxx #define ALIGN 256 template<typename dtype_A, typename dtype_B, typename dtype_C, dtype_C(*f)(dtype_A, dtype_B), void(*g)(dtype_C, dtype_C&)> __global__ void offload_csrmm(int m, int n, int k, char const * all_data, dtype_B const * B, dtype_C * C){ int64_t nnz_A = ((int64_t*)all_data)[0]; int offset = 3*sizeof(int64_t); if (offset % ALIGN != 0) offset += ALIGN-(offset%ALIGN); dtype_A const * A = (dtype_A const *)(all_data + offset); offset += nnz_A*sizeof(dtype_A); if (offset % ALIGN != 0) offset += ALIGN-(offset%ALIGN); int const * IA = (int*)(all_data + offset); offset += (m+1)*sizeof(int); if (offset % ALIGN != 0) offset += ALIGN-(offset%ALIGN); int const * JA = (int*)(all_data + offset); cuda_csrmmf<dtype_A,dtype_B,dtype_C,f,g>(m,n,k,A,JA,IA,B,C); } #undef ALIGN #endif template<typename dtype> #ifdef __CUDACC__ __device__ __host__ #endif void default_monoid(dtype a, dtype & b){ b = a+b; } template<typename dtype=double, void(*g)(dtype, dtype&)=default_monoid<dtype> > class Monoid_Kernel : public CTF_int::accumulatable { public: static MPI_Op get_MPI_Op(){ MPI_Op moo; //FIXME: assumes monoid is commutative MPI_Op_create( [](void * a, void * b, int * n, MPI_Datatype*){ for (int i=0; i<*n; i++){ g(((dtype*)a)[i], ((dtype*)b)[i]); } }, 1, &moo); return moo; } Monoid_Kernel(){ this->el_size = sizeof(dtype); } void accum(char const * a, char * b) const { g(((dtype const *)a)[0], ((dtype *)b)[0]); } static void xpy(int n, dtype const * X, int incX, dtype * Y, int incY){ for (int i=0; i<n; i++){ g(X[incX*i],Y[incY*i]); } } /** \brief initialize n objects to zero * \param[in] n number of items * \param[in] arr array containing n items, to be set to zero */ virtual void init_shell(int64_t n, char * arr) const { dtype dummy = dtype(); for (int i=0; i<n; i++){ memcpy(arr+i*el_size,(char*)&dummy,el_size); } } }; template<typename dtype_A, typename dtype_B, typename dtype_C, dtype_C(*f)(dtype_A, dtype_B), void(*g)(dtype_C, dtype_C&)=default_monoid<dtype_C> > class Bivar_Kernel : public Monoid_Kernel<dtype_C, g>, public Bivar_Function<dtype_A, dtype_B, dtype_C> { public: Bivar_Kernel() : Bivar_Function<dtype_A, dtype_B, dtype_C>(f) { this->has_kernel = true; #ifdef __CUDACC__ this->has_off_gemm = true; #endif this->el_size = sizeof(dtype_C); } Bivar_Kernel(bool is_comm) : Bivar_Function<dtype_A, dtype_B, dtype_C>(f, is_comm) { this->has_kernel = true; #ifdef __CUDACC__ this->has_off_gemm = true; #endif } static void gemm(char tA, char tB, int m, int n, int k, dtype_A const * A, dtype_B const * B, dtype_C * C){ int lda_A_m = tA == 'N' ? 1 : k; int lda_A_k = tA == 'N' ? m : 1; int lda_B_k = tB == 'N' ? 1 : n; int lda_B_n = tB == 'N' ? k : 1; #ifdef _OPENMP #pragma omp parallel for #endif for (int mi=0; mi<m; mi++){ #ifdef _OPENMP #pragma omp parallel for #endif for (int ni=0; ni<n; ni++){ for (int ki=0; ki<k; ki++){ g(f(A[mi*lda_A_m+ki*lda_A_k], B[ki*lda_B_k+ni*lda_B_n]), C[mi +ni*m]); } } } } static void coomm(int m, int n, int k, dtype_A const * A, int const * rows_A, int const * cols_A, int nnz_A, dtype_B const * B, dtype_C * C){ //TAU_FSTART(default_fcoomm); for (int i=0; i<nnz_A; i++){ int row_A = rows_A[i]-1; int col_A = cols_A[i]-1; for (int col_C=0; col_C<n; col_C++){ g(f(A[i],B[col_C*k+col_A]),C[col_C*m+row_A]); } } //TAU_FSTOP(default_fcoomm); } void ccoomm(int m, int n, int k, char const * A, int const * rows_A, int const * cols_A, int64_t nnz_A, char const * B, char * C) const { coomm(m, n, k, (dtype_A const *)A, rows_A, cols_A, nnz_A, (dtype_B const *)B, (dtype_C *)C); } static void csrmm(int m, int n, int k, dtype_A const * A, int const * JA, int const * IA, int64_t nnz_A, dtype_B const * B, dtype_C * C){ //TAU_FSTART(3type_csrmm); #ifdef _OPENMP #pragma omp parallel for #endif for (int row_A=0; row_A<m; row_A++){ #ifdef _OPENMP #pragma omp parallel for #endif for (int col_B=0; col_B<n; col_B++){ for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){ int col_A = JA[i_A]-1; g(f(A[i_A],B[col_B*k+col_A]),C[col_B*m+row_A]); } } } //TAU_FSTOP(3type_csrmm); } void cgemm(char tA, char tB, int m, int n, int k, char const * A, char const * B, char * C) const { gemm(tA, tB, m, n, k, (dtype_A const *)A, (dtype_B const *)B, (dtype_C *)C); } // FIXME: below kernels replicate code from src/interface/semiring.h static void csrmultd (int m, int n, int k, dtype_A const * A, int const * JA, int const * IA, int nnz_A, dtype_B const * B, int const * JB, int const * IB, int nnz_B, dtype_C * C){ #ifdef _OPENMP #pragma omp parallel for #endif for (int row_A=0; row_A<m; row_A++){ for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){ int row_B = JA[i_A]-1; //=col_A for (int i_B=IB[row_B]-1; i_B<IB[row_B+1]-1; i_B++){ int col_B = JB[i_B]-1; g(f(A[i_A],B[i_B]),C[col_B*m+row_A]); } } } } void csrmultcsr_old (int m, int n, int k, dtype_A const * A, int const * JA, int const * IA, int nnz_A, dtype_B const * B, int const * JB, int const * IB, int nnz_B, char *& C_CSR) const { int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1)); int * has_col = (int*)CTF_int::alloc(sizeof(int)*n); IC[0] = 1; for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*n); IC[i+1] = IC[i]; CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col); for (int j=0; j<n; j++){ IC[i+1] += has_col[j]; } } CTF_int::CSR_Matrix C(IC[m]-1, m, n, this); dtype_C * vC = (dtype_C*)C.vals(); int * JC = C.JA(); memcpy(C.IA(), IC, sizeof(int)*(m+1)); CTF_int::cdealloc(IC); IC = C.IA(); int64_t * rev_col = (int64_t*)CTF_int::alloc(sizeof(int64_t)*n); for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*n); CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col); int vs = 0; for (int j=0; j<n; j++){ if (has_col[j]){ JC[IC[i]+vs-1] = j+1; rev_col[j] = IC[i]+vs-1; vs++; } } memset(has_col, 0, sizeof(int)*n); for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_B = JA[IA[i]+j-1]-1; int idx_A = IA[i]+j-1; for (int l=0; l<IB[row_B+1]-IB[row_B]; l++){ int idx_B = IB[row_B]+l-1; if (has_col[JB[idx_B]-1]) g(f(A[idx_A],B[idx_B]), vC[rev_col[JB[idx_B]-1]]); else vC[rev_col[JB[idx_B]-1]] = f(A[idx_A],B[idx_B]); has_col[JB[idx_B]-1] = 1; } } } CTF_int::CSR_Matrix C_in(C_CSR); if (C_CSR == NULL || C_in.nnz() == 0){ C_CSR = C.all_data; } else { char * ans = CTF_int::CSR_Matrix::csr_add(C_CSR, C.all_data, this); CTF_int::cdealloc(C.all_data); C_CSR = ans; } CTF_int::cdealloc(has_col); CTF_int::cdealloc(rev_col); } void csrmultcsr (int m, int n, int k, dtype_A const * A, // A m by k int const * JA, int const * IA, int nnz_A, dtype_B const * B, // B k by n int const * JB, int const * IB, int nnz_B, char *& C_CSR) const { //int *ic = (int*)Malloc(sizeof(int)*(m+1)); int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1)); memset(IC, 0, sizeof(int)*(m+1)); #ifdef _OPENMP #pragma omp parallel { #endif int * has_col = (int*)CTF_int::alloc(sizeof(int)*(n+1)); //n is the num of col of B int nnz = 0; #ifdef _OPENMP #pragma omp for schedule(dynamic) // TO DO test other strategies #endif for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*(n+1)); nnz = 0; for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_B = JA[IA[i]+j-1]-1; for (int kk=0; kk<IB[row_B+1]-IB[row_B]; kk++){ int idx_B = IB[row_B]+kk-1; if (has_col[JB[idx_B]] == 0){ nnz++; has_col[JB[idx_B]] = 1; } } IC[i+1]=nnz; } } CTF_int::cdealloc(has_col); #ifdef _OPENMP } // END PARALLEL #endif int ic_prev = 1; for(int i=0;i < m+1; i++){ ic_prev += IC[i]; IC[i] = ic_prev; } CTF_int::CSR_Matrix C(IC[m]-1, m, n, this); dtype_C * vC = (dtype_C*)C.vals(); int * JC = C.JA(); memcpy(C.IA(), IC, sizeof(int)*(m+1)); CTF_int::cdealloc(IC); IC = C.IA(); #ifdef _OPENMP #pragma omp parallel { #endif int ins = 0; int *dcol = (int *) CTF_int::alloc(n*sizeof(int)); dtype_C *acc_data = new dtype_C[n]; #ifdef _OPENMP #pragma omp for #endif for (int i=0; i<m; i++){ memset(dcol, 0, sizeof(int)*(n)); ins = 0; for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_b = JA[IA[i]+j-1]-1; // 1-based int idx_a = IA[i]+j-1; for (int ii = 0; ii < IB[row_b+1]-IB[row_b]; ii++){ int col_b = IB[row_b]+ii-1; int col_c = JB[col_b]-1; // 1-based // dtype_C val = fmul(A[idx_a], B[col_b]); if (dcol[col_c] == 0){ dcol[col_c] = JB[col_b]; acc_data[col_c] =f(A[idx_a],B[col_b]); } else { g(f(A[idx_a],B[col_b]), acc_data[col_c]); } } } for(int jj = 0; jj < n; jj++){ if (dcol[jj] != 0){ JC[IC[i]+ins-1] = dcol[jj]; vC[IC[i]+ins-1] = acc_data[jj]; ++ins; } } } CTF_int::cdealloc(dcol); delete [] acc_data; #ifdef _OPENMP } //PRAGMA END #endif CTF_int::CSR_Matrix C_in(C_CSR); if (C_CSR == NULL || C_in.nnz() == 0){ C_CSR = C.all_data; } else { char * ans = CTF_int::CSR_Matrix::csr_add(C_CSR, C.all_data, this); CTF_int::cdealloc(C.all_data); C_CSR = ans; } } void ccsrmultd (int m, int n, int k, char const * A, int const * JA, int const * IA, int nnz_A, char const * B, int const * JB, int const * IB, int nnz_B, char * C, CTF_int::algstrct const * sr_C) const { csrmultd(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B,JB,IB,nnz_B,(dtype_C *)C); } void ccsrmultcsr (int m, int n, int k, char const * A, int const * JA, int const * IA, int nnz_A, char const * B, int const * JB, int const * IB, int nnz_B, char *& C_CSR, CTF_int::algstrct const * sr_C) const { csrmultcsr(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, JB, IB, nnz_B, C_CSR); } void ccsrmm(int m, int n, int k, char const * A, int const * JA, int const * IA, int64_t nnz_A, char const * B, char * C, CTF_int::algstrct const * sr_C) const { csrmm(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, (dtype_C *)C); } static void offload_gemm(char tA, char tB, int m, int n, int k, dtype_A const * A, dtype_B const * B, dtype_C * C){ #ifdef __CUDACC__ #ifdef PROFILE_CUGEMM //TAU_FSTART(3type_cugemm); #endif cuda_gemmf<dtype_A,dtype_B,dtype_C,f,g><<<NBLK,NTRD>>>(tA, tB, m, n, k, A, B, C); #ifdef PROFILE_CUGEMM cudaDeviceSynchronize(); //TAU_FSTOP(3type_cugemm); #endif #else assert(0); #endif } void coffload_gemm(char tA, char tB, int m, int n, int k, char const * A, char const * B, char * C) const { offload_gemm(tA, tB, m, n, k, (dtype_A const *)A, (dtype_B const *)B, (dtype_C*)C); } /* void coffload_csrmm(int m, int n, int k, char const * A, int const * JA, int const * IA, int64_t nnz_A, char const * B, char * C) const { offload_csrmm(m, n, k, (dtype_A const *)A, JA, IA, nnz_A, (dtype_B const *)B, (dtype_C*)C); }*/ void coffload_csrmm(int m, int n, int k, char const * all_data, char const * B, char * C) const { #ifdef __CUDACC__ #ifdef PROFILE_CUGEMM //TAU_FSTART(3type_cucsrmm); #endif offload_csrmm<dtype_A,dtype_B,dtype_C,f,g><<<NBLK,NTRD>>>(m, n, k, all_data, (dtype_B const *)B, (dtype_C *)C); #ifdef PROFILE_CUGEMM cudaDeviceSynchronize(); //TAU_FSTOP(3type_cucsrmm); #endif #else assert(0); #endif // offload_csrmm(m, n, k, (dtype_A const *)A, JA, IA, nnz_A, (dtype_B const *)B, (dtype_C*)C); } /* static void axpy(int n, dtype_C alpha, dtype_C const * X, int incX, dtype_C * Y int incY){ for (int i=0; i<n; i++){ g(f(alpha,X[incX*i]),Y[incY*i]); } }*/ }; } #endif
error.h
#pragma once #include <vector> #include <unordered_map> #include <algorithm> #include <cmath> #include "_cuda.h" #include "ceilDiv.h" #include "sum.h" using std::vector; using std::unordered_map; using std::abs; using std::max; // ABS-ERROR // --------- template <class T> auto absError(T *x, T *y, int N) { T a = T(); for (int i=0; i<N; i++) a += abs(x[i] - y[i]); return a; } template <class T> auto absError(vector<T>& x, vector<T>& y) { return absError(x.data(), y.data(), x.size()); } template <class K, class T> auto absError(unordered_map<K, T>& x, unordered_map<K, T>& y) { T a = T(); for (auto& p : x) a += abs(p.second - y[p.first]); return a; } // ABS-ERROR-ABS // ------------- template <class T> auto absErrorAbs(T *x, T *y, int N) { T a = T(); for (int i=0; i<N; i++) a += abs(abs(x[i]) - abs(y[i])); return a; } template <class T> auto absErrorAbs(vector<T>& x, vector<T>& y) { return absErrorAbs(x.data(), y.data(), x.size()); } template <class K, class T> auto absErrorAbs(unordered_map<K, T>& x, unordered_map<K, T>& y) { T a = T(); for (auto& p : x) a += abs(abs(p.second) - abs(y[p.first])); return a; } // ABS-ERROR (OMP) // --------------- template <class T> auto absErrorOmp(T *x, T *y, int N) { T a = T(); #pragma omp parallel for reduction (+:a) for (int i=0; i<N; i++) a += abs(x[i] - y[i]); return a; } template <class T> auto absErrorOmp(vector<T>& x, vector<T>& y) { return errorAbsOmp(x.data(), y.data(), x.size()); } // ABS-ERROR (CUDA) // ---------------- template <class T> __device__ T absErrorKernelLoop(T *x, T *y, int N, int i, int DI) { T a = T(); for (; i<N; i+=DI) a += abs(x[i] - y[i]); return a; } template <class T> __global__ void absErrorKernel(T *a, T *x, T *y, int N) { DEFINE(t, b, B, G); __shared__ T cache[BLOCK_DIM]; cache[t] = absErrorKernelLoop(x, y, N, B*b+t, G*B); sumKernelReduce(cache, B, t); if (t == 0) a[b] = cache[0]; } template <class T> auto absErrorCuda(T *x, T *y, int N) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); size_t G1 = G * sizeof(T); T a[GRID_DIM]; T *xD, *yD, *aD; TRY( cudaMalloc(&xD, N1) ); TRY( cudaMalloc(&yD, N1) ); TRY( cudaMalloc(&aD, G1) ); TRY( cudaMemcpy(xD, x, N1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, N1, cudaMemcpyHostToDevice) ); absErrorKernel<<<G, B>>>(aD, xD, yD, N); TRY( cudaMemcpy(a, aD, G1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(yD) ); TRY( cudaFree(xD) ); TRY( cudaFree(aD) ); return sum(a, G); } template <class T> auto absErrorCuda(vector<T>& x, vector<T>& y) { return absErrorCuda(x.data(), y.data(), x.size()); } // ABS-ERROR-ABS (CUDA) // -------------------- template <class T> __device__ T absErrorAbsKernelLoop(T *x, T *y, int N, int i, int DI) { T a = T(); for (; i<N; i+=DI) a += abs(abs(x[i]) - abs(y[i])); return a; } template <class T> __global__ void absErrorAbsKernel(T *a, T *x, T *y, int N) { DEFINE(t, b, B, G); __shared__ T cache[BLOCK_DIM]; cache[t] = absErrorAbsKernelLoop(x, y, N, B*b+t, G*B); sumKernelReduce(cache, B, t); if (t == 0) a[b] = cache[0]; } template <class T> auto absErrorAbsCuda(T *x, T *y, int N) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); size_t G1 = G * sizeof(T); T a[GRID_DIM]; T *xD, *yD, *aD; TRY( cudaMalloc(&xD, N1) ); TRY( cudaMalloc(&yD, N1) ); TRY( cudaMalloc(&aD, G1) ); TRY( cudaMemcpy(xD, x, N1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, N1, cudaMemcpyHostToDevice) ); absErrorAbsKernel<<<G, B>>>(aD, xD, yD, N); TRY( cudaMemcpy(a, aD, G1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(yD) ); TRY( cudaFree(xD) ); TRY( cudaFree(aD) ); return sum(a, G); } template <class T> auto absErrorAbsCuda(vector<T>& x, vector<T>& y) { return absErrorAbsCuda(x.data(), y.data(), x.size()); }
GB_Matrix_wait.c
//------------------------------------------------------------------------------ // GB_Matrix_wait: finish all pending computations on a single matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // This function is typically called via the GB_MATRIX_WAIT(A) macro, except // for GB_assign, GB_subassign, and GB_mxm. // The matrix A has zombies and/or pending tuples placed there by // GrB_setElement, GrB_*assign, or GB_mxm. Zombies must now be deleted, and // pending tuples must now be assembled together and added into the matrix. // The indices in A might also be jumbled; if so, they are sorted now. // When the function returns, and all pending tuples and zombies have been // deleted. This is true even the function fails due to lack of memory (in // that case, the matrix is cleared as well). // If A is hypersparse, the time taken is at most O(nnz(A) + t log t), where t // is the number of pending tuples in A, and nnz(A) includes both zombies and // live entries. There is no O(m) or O(n) time component, if A is m-by-n. // If the number of non-empty vectors of A grows too large, then A can be // converted to non-hypersparse. // If A is non-hypersparse, then O(n) is added in the worst case, to prune // zombies and to update the vector pointers for A. // If the method is successful, it does an OpenMP flush just before returning. #include "GB_select.h" #include "GB_add.h" #include "GB_Pending.h" #include "GB_build.h" #include "GB_jappend.h" #define GB_FREE_ALL \ { \ GB_phbix_free (A) ; \ GB_Matrix_free (&T) ; \ GB_Matrix_free (&S) ; \ GB_Matrix_free (&A1) ; \ } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Matrix_wait // finish all pending computations ( GrB_Matrix A, // matrix with pending computations GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Matrix T = NULL, S = NULL, A1 = NULL ; GrB_Info info = GrB_SUCCESS ; ASSERT_MATRIX_OK (A, "A to wait", GB_FLIP (GB0)) ; if (GB_IS_FULL (A) || GB_IS_BITMAP (A)) { // full and bitmap matrices never have any pending work ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; // ensure the matrix is written to memory #pragma omp flush return (GrB_SUCCESS) ; } // only sparse and hypersparse matrices can have pending work ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // get the zombie and pending count, and burble if work needs to be done //-------------------------------------------------------------------------- int64_t nzombies = A->nzombies ; int64_t npending = GB_Pending_n (A) ; if (nzombies > 0 || npending > 0 || A->jumbled) { GB_BURBLE_MATRIX (A, "(wait: " GBd " %s, " GBd " pending%s) ", nzombies, (nzombies == 1) ? "zombie" : "zombies", npending, A->jumbled ? ", jumbled" : "") ; } //-------------------------------------------------------------------------- // determine the max # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // assemble the pending tuples into T //-------------------------------------------------------------------------- int64_t tnz = 0 ; if (npending > 0) { //---------------------------------------------------------------------- // construct a new hypersparse matrix T with just the pending tuples //---------------------------------------------------------------------- // T has the same type as A->type, which can differ from the type of // the pending tuples, A->Pending->type. The Pending->op can be NULL // (an implicit SECOND function), or it can be any accum operator. The // z=accum(x,y) operator can have any types, and it does not have to be // associative. info = GB_builder ( &T, // create T A->type, // T->type = A->type A->vlen, // T->vlen = A->vlen A->vdim, // T->vdim = A->vdim A->is_csc, // T->is_csc = A->is_csc &(A->Pending->i), // iwork_handle, becomes T->i on output &(A->Pending->j), // jwork_handle, free on output &(A->Pending->x), // Swork_handle, free on output A->Pending->sorted, // tuples may or may not be sorted false, // there might be duplicates; look for them A->Pending->nmax, // size of Pending->[ijx] arrays true, // is_matrix: unused NULL, NULL, NULL, // original I,J,S tuples, not used here npending, // # of tuples A->Pending->op, // dup operator for assembling duplicates A->Pending->type->code, // type of Pending->x Context ) ; //---------------------------------------------------------------------- // free pending tuples //---------------------------------------------------------------------- // The tuples have been converted to T, which is more compact, and // duplicates have been removed. The following work needs to be done // even if the builder fails. // GB_builder frees A->Pending->j and A->Pending->x. If successful, // A->Pending->i is now T->i. Otherwise A->Pending->i is freed. In // both cases, A->Pending->i is NULL. ASSERT (A->Pending->i == NULL) ; ASSERT (A->Pending->j == NULL) ; ASSERT (A->Pending->x == NULL) ; // free the list of pending tuples GB_Pending_free (&(A->Pending)) ; ASSERT (!GB_PENDING (A)) ; ASSERT_MATRIX_OK (A, "A after moving pending tuples to T", GB0) ; //---------------------------------------------------------------------- // check the status of the builder //---------------------------------------------------------------------- // Finally check the status of the builder. The pending tuples, must // be freed (just above), whether or not the builder is successful. if (info != GrB_SUCCESS) { // out of memory in GB_builder GB_FREE_ALL ; return (info) ; } ASSERT_MATRIX_OK (T, "T = hypersparse matrix of pending tuples", GB0) ; ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (!GB_ZOMBIES (T)) ; ASSERT (!GB_JUMBLED (T)) ; ASSERT (!GB_PENDING (T)) ; tnz = GB_NNZ (T) ; ASSERT (tnz > 0) ; } //-------------------------------------------------------------------------- // delete zombies //-------------------------------------------------------------------------- // A zombie is an entry A(i,j) in the matrix that as been marked for // deletion, but hasn't been deleted yet. It is marked by "negating" // replacing its index i with GB_FLIP(i). // TODO: pass tnz to GB_selector, to pad the reallocated A matrix if (nzombies > 0) { // remove all zombies from A #ifdef GB_DEBUG int64_t anz_orig = GB_NNZ (A) ; #endif GB_OK (GB_selector (NULL /* A in-place */, GB_NONZOMBIE_opcode, NULL, false, A, 0, NULL, Context)) ; ASSERT (A->nzombies == (anz_orig - GB_NNZ (A))) ; A->nzombies = 0 ; } ASSERT_MATRIX_OK (A, "A after zombies removed", GB0) ; // all the zombies are gone, and pending tuples are now in T ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // unjumble the matrix //-------------------------------------------------------------------------- GB_OK (GB_unjumble (A, Context)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // check for pending tuples //-------------------------------------------------------------------------- if (npending == 0) { // conform A to its desired sparsity structure and return result info = GB_conform (A, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // check for quick transplant //-------------------------------------------------------------------------- int64_t anz = GB_NNZ (A) ; if (anz == 0) { // A has no entries so just transplant T into A, then free T and // conform A to its desired hypersparsity. info = GB_transplant_conform (A, A->type, &T, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // determine the method for A = A+T //-------------------------------------------------------------------------- // If anz > 0, T is hypersparse, even if A is a GrB_Vector ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (tnz > 0) ; ASSERT (T->nvec > 0) ; ASSERT (A->nvec > 0) ; // tjfirst = first vector in T int64_t tjfirst = T->h [0] ; int64_t anz0 = 0 ; int64_t kA = 0 ; int64_t jlast ; int64_t *GB_RESTRICT Ap = A->p ; int64_t *GB_RESTRICT Ah = A->h ; int64_t *GB_RESTRICT Ai = A->i ; GB_void *GB_RESTRICT Ax = (GB_void *) A->x ; int64_t anvec = A->nvec ; int64_t asize = A->type->size ; // anz0 = nnz (A0) = nnz (A (:, 0:tjfirst-1)), the region not modified by T if (A->h != NULL) { // find tjfirst in A->h int64_t pright = anvec - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (tjfirst, A->h, kA, pright, found) ; // A->h [0 ... kA-1] excludes vector tjfirst. The list // A->h [kA ... anvec-1] includes tjfirst. ASSERT (kA >= 0 && kA <= anvec) ; ASSERT (GB_IMPLIES (kA > 0 && kA < anvec, A->h [kA-1] < tjfirst)) ; ASSERT (GB_IMPLIES (found, A->h [kA] == tjfirst)) ; jlast = (kA > 0) ? A->h [kA-1] : (-1) ; } else { kA = tjfirst ; jlast = tjfirst - 1 ; } // anz1 = nnz (A1) = nnz (A (:, kA:end)), the region modified by T anz0 = A->p [kA] ; int64_t anz1 = anz - anz0 ; bool ignore ; // A + T will have anz_new entries int64_t anz_new = anz + tnz ; // must have at least this space if (2 * anz1 < anz0) { //---------------------------------------------------------------------- // append new tuples to A //---------------------------------------------------------------------- // A is growing incrementally. It splits into two parts: A = [A0 A1]. // where A0 = A (:, 0:kA-1) and A1 = A (:, kA:end). The // first part (A0 with anz0 = nnz (A0) entries) is not modified. The // second part (A1, with anz1 = nnz (A1) entries) overlaps with T. // If anz1 is zero, or small compared to anz0, then it is faster to // leave A0 unmodified, and to update just A1. // TODO: if A also had zombies, GB_selector could pad A so that // A->nzmax = anz + tnz. // make sure A has enough space for the new tuples if (anz_new > A->nzmax) { // double the size if not enough space GB_OK (GB_ix_resize (A, anz_new, Context)) ; Ai = A->i ; Ax = (GB_void *) A->x ; } //---------------------------------------------------------------------- // T = A1 + T //---------------------------------------------------------------------- if (anz1 > 0) { //------------------------------------------------------------------ // extract A1 = A (:, kA:end) as a shallow copy //------------------------------------------------------------------ // A1 = [0, A (:, kA:end)], hypersparse with same dimensions as A GB_OK (GB_new (&A1, // hyper, new header A->type, A->vlen, A->vdim, GB_Ap_malloc, A->is_csc, GxB_HYPERSPARSE, GB_ALWAYS_HYPER, anvec - kA, Context)) ; // the A1->i and A1->x content are shallow copies of A(:,kA:end) A1->x = (void *) (Ax + asize * anz0) ; A1->i = Ai + anz0 ; A1->x_shallow = true ; A1->i_shallow = true ; A1->nzmax = anz1 ; // fill the column A1->h and A1->p with A->h and A->p, shifted int64_t *GB_RESTRICT A1p = A1->p ; int64_t *GB_RESTRICT A1h = A1->h ; int64_t a1nvec = 0 ; for (int64_t k = kA ; k < anvec ; k++) { // get A (:,k) int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; if (pA_end > pA_start) { // add this column to A1 if A (:,k) is not empty int64_t j = GBH (Ah, k) ; A1p [a1nvec] = pA_start - anz0 ; A1h [a1nvec] = j ; a1nvec++ ; } } // finalize A1 A1p [a1nvec] = anz1 ; A1->nvec = a1nvec ; A1->nvec_nonempty = a1nvec ; A1->magic = GB_MAGIC ; ASSERT_MATRIX_OK (A1, "A1 slice for GB_Matrix_wait", GB0) ; //------------------------------------------------------------------ // S = A1 + T, with no operator or mask //------------------------------------------------------------------ GB_OK (GB_add (&S, A->type, A->is_csc, NULL, 0, 0, &ignore, A1, T, NULL, Context)) ; ASSERT_MATRIX_OK (S, "S = A1+T", GB0) ; // free A1 and T GB_Matrix_free (&T) ; GB_Matrix_free (&A1) ; //------------------------------------------------------------------ // replace T with S //------------------------------------------------------------------ T = S ; S = NULL ; tnz = GB_NNZ (T) ; //------------------------------------------------------------------ // remove A1 from the vectors of A, if A is hypersparse //------------------------------------------------------------------ if (A->h != NULL) { A->nvec = kA ; } } //---------------------------------------------------------------------- // append T to the end of A0 //---------------------------------------------------------------------- const int64_t *GB_RESTRICT Tp = T->p ; const int64_t *GB_RESTRICT Th = T->h ; const int64_t *GB_RESTRICT Ti = T->i ; const GB_void *GB_RESTRICT Tx = (GB_void *) T->x ; int64_t tnvec = T->nvec ; anz = anz0 ; int64_t anz_last = anz ; int nthreads = GB_nthreads (tnz, chunk, nthreads_max) ; // append the indices and values of T to the end of A GB_memcpy (Ai + anz , Ti, tnz * sizeof (int64_t), nthreads) ; GB_memcpy (Ax + anz * asize, Tx, tnz * asize , nthreads) ; // append the vectors of T to the end of A for (int64_t k = 0 ; k < tnvec ; k++) { int64_t j = Th [k] ; ASSERT (j >= tjfirst) ; anz += (Tp [k+1] - Tp [k]) ; GB_OK (GB_jappend (A, j, &jlast, anz, &anz_last, Context)) ; } GB_jwrapup (A, jlast, anz) ; ASSERT (anz == anz_new) ; // need to recompute the # of non-empty vectors in GB_conform A->nvec_nonempty = -1 ; // recomputed just below ASSERT_MATRIX_OK (A, "A after GB_Matrix_wait:append", GB0) ; GB_Matrix_free (&T) ; // conform A to its desired sparsity structure info = GB_conform (A, Context) ; } else { //---------------------------------------------------------------------- // A = A+T //---------------------------------------------------------------------- // The update is not incremental since most of A is changing. Just do // a single parallel add: S=A+T, free T, and then transplant S back // into A. The nzmax of A is tight, with no room for future // incremental growth. // FUTURE:: if GB_add could tolerate zombies in A, then the initial // prune of zombies can be skipped. GB_OK (GB_add (&S, A->type, A->is_csc, NULL, 0, 0, &ignore, A, T, NULL, Context)) ; GB_Matrix_free (&T) ; ASSERT_MATRIX_OK (S, "S after GB_Matrix_wait:add", GB0) ; info = GB_transplant_conform (A, A->type, &S, Context) ; } //-------------------------------------------------------------------------- // flush the matrix and return result //-------------------------------------------------------------------------- #pragma omp flush return (info) ; }
GB_unop__bnot_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_uint32_uint32) // op(A') function: GB (_unop_tran__bnot_uint32_uint32) // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = ~(z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_uint32_uint32) ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = ~(z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_uint32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a.35.5.c
/* { dg-do compile } */ void wrong5 (int n) { #pragma omp parallel { #pragma omp critical { work (n, 0); /* incorrect nesting of barrier region in a critical region */ #pragma omp barrier work (n, 1); } } }
boxloop_cuda.h
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_NEWBOXLOOP_HEADER #define HYPRE_NEWBOXLOOP_HEADER #include <cuda.h> #include <cuda_runtime.h> #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif #define HYPRE_LAMBDA [=] __host__ __device__ #define BLOCKSIZE 512 typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0,lsize1,lsize2; HYPRE_Int strides0,strides1,strides2; HYPRE_Int bstart0,bstart1,bstart2; HYPRE_Int bsize0,bsize1,bsize2; } hypre_Boxloop; #if 1 #define hypre_fence() /*printf("\n hypre_newBoxLoop in %s(%d) function %s\n",__FILE__,__LINE__,__FUNCTION__);*/ #else #define hypre_fence() \ { \ cudaError err = cudaGetLastError(); \ if ( cudaSuccess != err ) \ { \ printf("\n ERROR hypre_newBoxLoop: %s in %s(%d) function %s\n",cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \ /* HYPRE_Int *p = NULL; *p = 1; */ \ } \ hypre_CheckErrorDevice(cudaDeviceSynchronize()); \ } #endif /* #define hypre_reduce_policy cuda_reduce<BLOCKSIZE> */ extern "C++" { template <typename LOOP_BODY> __global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length) { HYPRE_Int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < length) { loop_body(idx); } } template<typename LOOP_BODY> void BoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY loop_body) { if (policy == HYPRE_MEMORY_HOST) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (policy == HYPRE_MEMORY_DEVICE) { HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE; const dim3 gDim(gridSize), bDim(BLOCKSIZE); HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length ); } else if (policy == 2) { } } template <typename LOOP_BODY> __global__ void reductionforall_kernel(LOOP_BODY ReductionLoop, HYPRE_Int length) { ReductionLoop(blockDim.x*blockIdx.x+threadIdx.x, blockDim.x*gridDim.x, length); } template<typename LOOP_BODY> void ReductionBoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY ReductionLoop) { if (length <= 0) { return; } if (policy == HYPRE_MEMORY_HOST) { } else if (policy == HYPRE_MEMORY_DEVICE) { HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE; gridSize = hypre_min(gridSize, 1024); /* hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", length, BLOCKSIZE, gridSize); */ const dim3 gDim(gridSize), bDim(BLOCKSIZE); HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, ReductionLoop, length ); } } } #define hypre_BoxLoopIncK(k,box,hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); #define hypre_newBoxLoopInit(ndim,loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \ hypre__tot *= loop_size[hypre_d]; #define hypre_BasicBoxLoopInit(ndim,loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \ hypre__tot *= loop_size[hypre_d]; \ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ hypre_fence(); \ } #define hypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define hypre_newBoxLoop1Begin(ndim, loop_size, \ dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); #define hypre_newBoxLoop1End(i1) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop2Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop3Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ hypre_BoxLoopIncK(3,databox3,i3); #define hypre_newBoxLoop3End(i1, i2,i3) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop4Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \ hypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ hypre_BoxLoopIncK(3,databox3,i3); \ hypre_BoxLoopIncK(4,databox4,i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ hypre_fence(); \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, \ stride1, i1) \ { \ hypre_BasicBoxLoopInit(ndim,loop_size); \ zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \ stride1, i1, \ stride2, i2) \ { \ hypre_BasicBoxLoopInit(ndim,loop_size); \ zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \ zypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \ BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ #define hypre_LoopBegin(size,idx) \ { \ BoxLoopforall(hypre_exec_policy,size,HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ hypre_fence(); \ } #define hypre_newBoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); index[1] = hypre_IndexD(local_idx, 1); index[2] = hypre_IndexD(local_idx, 2); #define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex #define hypre_BoxLoopSetOneBlock() ; #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin /* Reduction BoxLoop1*/ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ reducesum) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \ ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \ HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \ HYPRE_Int len) \ { \ for (HYPRE_Int idx = tid; \ idx < len; \ idx += nthreads) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ } \ reducesum.BlockReduce(); \ }); \ hypre_fence(); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ reducesum) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \ ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \ HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \ HYPRE_Int len) \ { \ for (HYPRE_Int idx = tid; \ idx < len; \ idx += nthreads) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ } \ reducesum.BlockReduce(); \ }); \ hypre_fence(); \ } #endif
clean.h
/**************************************************************************** * VCGLib o o * * Visual and Computer Graphics Library o o * * _ O _ * * Copyright(C) 2004 \/)\/ * * Visual Computing Lab /\/| * * ISTI - Italian National Research Council | * * \ * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * * for more details. * * * ****************************************************************************/ #ifndef __VCGLIB_CLEAN #define __VCGLIB_CLEAN // VCG headers #include <vcg/complex/complex.h> #include <vcg/simplex/face/pos.h> #include <vcg/simplex/face/topology.h> #include <vcg/simplex/edge/topology.h> #include <vcg/complex/algorithms/closest.h> #include <vcg/space/index/grid_static_ptr.h> #include <vcg/space/index/spatial_hashing.h> #include <vcg/complex/algorithms/update/selection.h> #include <vcg/complex/algorithms/update/flag.h> #include <vcg/complex/algorithms/update/normal.h> #include <vcg/complex/algorithms/update/topology.h> #include <vcg/space/triangle3.h> namespace vcg { namespace tri{ template <class ConnectedMeshType> class ConnectedComponentIterator { public: typedef ConnectedMeshType MeshType; typedef typename MeshType::VertexType VertexType; typedef typename MeshType::VertexPointer VertexPointer; typedef typename MeshType::VertexIterator VertexIterator; typedef typename MeshType::ScalarType ScalarType; typedef typename MeshType::FaceType FaceType; typedef typename MeshType::FacePointer FacePointer; typedef typename MeshType::FaceIterator FaceIterator; typedef typename MeshType::ConstFaceIterator ConstFaceIterator; typedef typename MeshType::FaceContainer FaceContainer; public: void operator ++() { FacePointer fpt=sf.top(); sf.pop(); for(int j=0;j<3;++j) if( !face::IsBorder(*fpt,j) ) { FacePointer l=fpt->FFp(j); if( !tri::IsMarked(*mp,l) ) { tri::Mark(*mp,l); sf.push(l); } } } void start(MeshType &m, FacePointer p) { tri::RequirePerFaceMark(m); mp=&m; while(!sf.empty()) sf.pop(); UnMarkAll(m); assert(p); assert(!p->IsD()); tri::Mark(m,p); sf.push(p); } bool completed() { return sf.empty(); } FacePointer operator *() { return sf.top(); } private: std::stack<FacePointer> sf; MeshType *mp; }; /// /** \addtogroup trimesh */ /*@{*/ /// Class of static functions to clean//restore meshs. template <class CleanMeshType> class Clean { public: typedef CleanMeshType MeshType; typedef typename MeshType::VertexType VertexType; typedef typename MeshType::VertexPointer VertexPointer; typedef typename MeshType::VertexIterator VertexIterator; typedef typename MeshType::ConstVertexIterator ConstVertexIterator; typedef typename MeshType::EdgeIterator EdgeIterator; typedef typename MeshType::EdgePointer EdgePointer; typedef typename MeshType::CoordType CoordType; typedef typename MeshType::ScalarType ScalarType; typedef typename MeshType::FaceType FaceType; typedef typename MeshType::FacePointer FacePointer; typedef typename MeshType::FaceIterator FaceIterator; typedef typename MeshType::ConstFaceIterator ConstFaceIterator; typedef typename MeshType::FaceContainer FaceContainer; typedef typename vcg::Box3<ScalarType> Box3Type; typedef GridStaticPtr<FaceType, ScalarType > TriMeshGrid; /* classe di confronto per l'algoritmo di eliminazione vertici duplicati*/ class RemoveDuplicateVert_Compare{ public: inline bool operator()(VertexPointer const &a, VertexPointer const &b) { return ((*a).cP() == (*b).cP()) ? (a<b): ((*a).cP() < (*b).cP()); } }; /** This function removes all duplicate vertices of the mesh by looking only at their spatial positions. * Note that it does not update any topology relation that could be affected by this like the VT or TT relation. * the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateVertex( MeshType & m, bool RemoveDegenerateFlag=true) // V1.0 { if(m.vert.size()==0 || m.vn==0) return 0; std::map<VertexPointer, VertexPointer> mp; size_t i,j; VertexIterator vi; int deleted=0; int k=0; size_t num_vert = m.vert.size(); std::vector<VertexPointer> perm(num_vert); for(vi=m.vert.begin(); vi!=m.vert.end(); ++vi, ++k) perm[k] = &(*vi); RemoveDuplicateVert_Compare c_obj; std::sort(perm.begin(),perm.end(),c_obj); j = 0; i = j; mp[perm[i]] = perm[j]; ++i; for(;i!=num_vert;) { if( (! (*perm[i]).IsD()) && (! (*perm[j]).IsD()) && (*perm[i]).P() == (*perm[j]).cP() ) { VertexPointer t = perm[i]; mp[perm[i]] = perm[j]; ++i; Allocator<MeshType>::DeleteVertex(m,*t); deleted++; } else { j = i; ++i; } } for(FaceIterator fi = m.face.begin(); fi!=m.face.end(); ++fi) if( !(*fi).IsD() ) for(k = 0; k < (*fi).VN(); ++k) if( mp.find( (typename MeshType::VertexPointer)(*fi).V(k) ) != mp.end() ) { (*fi).V(k) = &*mp[ (*fi).V(k) ]; } for(EdgeIterator ei = m.edge.begin(); ei!=m.edge.end(); ++ei) if( !(*ei).IsD() ) for(k = 0; k < 2; ++k) if( mp.find( (typename MeshType::VertexPointer)(*ei).V(k) ) != mp.end() ) { (*ei).V(k) = &*mp[ (*ei).V(k) ]; } if(RemoveDegenerateFlag) RemoveDegenerateFace(m); if(RemoveDegenerateFlag && m.en>0) { RemoveDegenerateEdge(m); RemoveDuplicateEdge(m); } return deleted; } class SortedPair { public: SortedPair() {} SortedPair(unsigned int v0, unsigned int v1, EdgePointer _fp) { v[0]=v0;v[1]=v1; fp=_fp; if(v[0]>v[1]) std::swap(v[0],v[1]); } bool operator < (const SortedPair &p) const { return (v[1]!=p.v[1])?(v[1]<p.v[1]): (v[0]<p.v[0]); } bool operator == (const SortedPair &s) const { if( (v[0]==s.v[0]) && (v[1]==s.v[1]) ) return true; return false; } unsigned int v[2]; EdgePointer fp; }; class SortedTriple { public: SortedTriple() {} SortedTriple(unsigned int v0, unsigned int v1, unsigned int v2,FacePointer _fp) { v[0]=v0;v[1]=v1;v[2]=v2; fp=_fp; std::sort(v,v+3); } bool operator < (const SortedTriple &p) const { return (v[2]!=p.v[2])?(v[2]<p.v[2]): (v[1]!=p.v[1])?(v[1]<p.v[1]): (v[0]<p.v[0]); } bool operator == (const SortedTriple &s) const { if( (v[0]==s.v[0]) && (v[1]==s.v[1]) && (v[2]==s.v[2]) ) return true; return false; } unsigned int v[3]; FacePointer fp; }; /** This function removes all duplicate faces of the mesh by looking only at their vertex reference. So it should be called after unification of vertices. Note that it does not update any topology relation that could be affected by this like the VT or TT relation. the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateFace( MeshType & m) // V1.0 { std::vector<SortedTriple> fvec; for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { fvec.push_back(SortedTriple( tri::Index(m,(*fi).V(0)), tri::Index(m,(*fi).V(1)), tri::Index(m,(*fi).V(2)), &*fi)); } assert (size_t(m.fn) == fvec.size()); std::sort(fvec.begin(),fvec.end()); int total=0; for(int i=0;i<int(fvec.size())-1;++i) { if(fvec[i]==fvec[i+1]) { total++; tri::Allocator<MeshType>::DeleteFace(m, *(fvec[i].fp) ); } } return total; } /** This function removes all duplicate faces of the mesh by looking only at their vertex reference. So it should be called after unification of vertices. Note that it does not update any topology relation that could be affected by this like the VT or TT relation. the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateEdge( MeshType & m) // V1.0 { if (m.en==0) return 0; std::vector<SortedPair> eVec; for(EdgeIterator ei=m.edge.begin();ei!=m.edge.end();++ei) if(!(*ei).IsD()) { eVec.push_back(SortedPair( tri::Index(m,(*ei).V(0)), tri::Index(m,(*ei).V(1)), &*ei)); } assert (size_t(m.en) == eVec.size()); //for(int i=0;i<fvec.size();++i) qDebug("fvec[%i] = (%i %i %i)(%i)",i,fvec[i].v[0],fvec[i].v[1],fvec[i].v[2],tri::Index(m,fvec[i].fp)); std::sort(eVec.begin(),eVec.end()); int total=0; for(int i=0;i<int(eVec.size())-1;++i) { if(eVec[i]==eVec[i+1]) { total++; tri::Allocator<MeshType>::DeleteEdge(m, *(eVec[i].fp) ); //qDebug("deleting face %i (pos in fvec %i)",tri::Index(m,fvec[i].fp) ,i); } } return total; } static int CountUnreferencedVertex( MeshType& m) { return RemoveUnreferencedVertex(m,false); } /** This function removes that are not referenced by any face. The function updates the vn counter. @param m The mesh @return The number of removed vertices */ static int RemoveUnreferencedVertex( MeshType& m, bool DeleteVertexFlag=true) // V1.0 { FaceIterator fi; EdgeIterator ei; VertexIterator vi; int referredBit = VertexType::NewBitFlag(); int j; int deleted = 0; for(vi=m.vert.begin();vi!=m.vert.end();++vi) (*vi).ClearUserBit(referredBit); for(fi=m.face.begin();fi!=m.face.end();++fi) if( !(*fi).IsD() ) for(j=0;j<(*fi).VN();++j) (*fi).V(j)->SetUserBit(referredBit); for(ei=m.edge.begin();ei!=m.edge.end();++ei) if( !(*ei).IsD() ){ (*ei).V(0)->SetUserBit(referredBit); (*ei).V(1)->SetUserBit(referredBit); } for(vi=m.vert.begin();vi!=m.vert.end();++vi) if( (!(*vi).IsD()) && (!(*vi).IsUserBit(referredBit))) { if(DeleteVertexFlag) Allocator<MeshType>::DeleteVertex(m,*vi); ++deleted; } VertexType::DeleteBitFlag(referredBit); return deleted; } /** Degenerate vertices are vertices that have coords with invalid floating point values, All the faces incident on deleted vertices are also deleted */ static int RemoveDegenerateVertex(MeshType& m) { VertexIterator vi; int count_vd = 0; for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(math::IsNAN( (*vi).P()[0]) || math::IsNAN( (*vi).P()[1]) || math::IsNAN( (*vi).P()[2]) ) { count_vd++; Allocator<MeshType>::DeleteVertex(m,*vi); } FaceIterator fi; int count_fd = 0; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) if( (*fi).V(0)->IsD() || (*fi).V(1)->IsD() || (*fi).V(2)->IsD() ) { count_fd++; Allocator<MeshType>::DeleteFace(m,*fi); } return count_vd; } /** Degenerate faces are faces that are Topologically degenerate, i.e. have two or more vertex reference that link the same vertex (and not only two vertexes with the same coordinates). All Degenerate faces are zero area faces BUT not all zero area faces are degenerate. We do not take care of topology because when we have degenerate faces the topology calculation functions crash. */ static int RemoveDegenerateFace(MeshType& m) { int count_fd = 0; for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) { if((*fi).V(0) == (*fi).V(1) || (*fi).V(0) == (*fi).V(2) || (*fi).V(1) == (*fi).V(2) ) { count_fd++; Allocator<MeshType>::DeleteFace(m,*fi); } } return count_fd; } static int RemoveDegenerateEdge(MeshType& m) { int count_ed = 0; for(EdgeIterator ei=m.edge.begin(); ei!=m.edge.end();++ei) if(!(*ei).IsD()) { if((*ei).V(0) == (*ei).V(1) ) { count_ed++; Allocator<MeshType>::DeleteEdge(m,*ei); } } return count_ed; } static int RemoveNonManifoldVertex(MeshType& m) { CountNonManifoldVertexFF(m,true); tri::UpdateSelection<MeshType>::FaceFromVertexLoose(m); int count_removed = 0; FaceIterator fi; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD() && (*fi).IsS()) Allocator<MeshType>::DeleteFace(m,*fi); VertexIterator vi; for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(!(*vi).IsD() && (*vi).IsS()) { ++count_removed; Allocator<MeshType>::DeleteVertex(m,*vi); } return count_removed; } static int SplitSelectedVertexOnEdgeMesh(MeshType& m) { tri::RequireCompactness(m); tri::UpdateFlags<MeshType>::VertexClearV(m); int count_split = 0; for(size_t i=0;i<m.edge.size();++i) { for(int j=0;j<2;++j) { VertexPointer vp = m.edge[i].V(j); if(vp->IsS()) { if(!vp->IsV()) { m.edge[i].V(j) = &*(tri::Allocator<MeshType>::AddVertex(m,vp->P())); ++count_split; } else { vp->SetV(); } } } } return count_split; } static void SelectNonManifoldVertexOnEdgeMesh(MeshType &m) { tri::RequireCompactness(m); tri::UpdateSelection<MeshType>::VertexClear(m); std::vector<int> cnt(m.vn,0); for(size_t i=0;i<m.edge.size();++i) { cnt[tri::Index(m,m.edge[i].V(0))]++; cnt[tri::Index(m,m.edge[i].V(1))]++; } for(size_t i=0;i<m.vert.size();++i) if(cnt[i]>2) m.vert[i].SetS(); } static void SelectCreaseVertexOnEdgeMesh(MeshType &m, ScalarType AngleRadThr) { tri::RequireCompactness(m); tri::RequireVEAdjacency(m); tri::UpdateTopology<MeshType>::VertexEdge(m); for(size_t i=0;i<m.vert.size();++i) { std::vector<VertexPointer> VVStarVec; edge::VVStarVE(&(m.vert[i]),VVStarVec); if(VVStarVec.size()==2) { CoordType v0 = m.vert[i].P() - VVStarVec[0]->P(); CoordType v1 = m.vert[i].P() - VVStarVec[1]->P(); float angle = M_PI-vcg::Angle(v0,v1); if(angle > AngleRadThr) m.vert[i].SetS(); } } } /// Removal of faces that were incident on a non manifold edge. // Given a mesh with FF adjacency // it search for non manifold vertices and duplicate them. // Duplicated vertices are moved apart according to the move threshold param. // that is a percentage of the average vector from the non manifold vertex to the barycenter of the incident faces. static int SplitNonManifoldVertex(MeshType& m, ScalarType moveThreshold) { RequireFFAdjacency(m); typedef std::pair<FacePointer,int> FaceInt; // a face and the index of the vertex that we have to change // std::vector<std::pair<VertexPointer, std::vector<FaceInt> > >ToSplitVec; SelectionStack<MeshType> ss(m); ss.push(); CountNonManifoldVertexFF(m,true); UpdateFlags<MeshType>::VertexClearV(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;i++) if((*fi).V(i)->IsS() && !(*fi).V(i)->IsV()) { (*fi).V(i)->SetV(); face::Pos<FaceType> startPos(&*fi,i); face::Pos<FaceType> curPos = startPos; std::set<FaceInt> faceSet; do { faceSet.insert(std::make_pair(curPos.F(),curPos.VInd())); curPos.NextE(); } while (curPos != startPos); ToSplitVec.push_back(make_pair((*fi).V(i),std::vector<FaceInt>())); typename std::set<FaceInt>::const_iterator iii; for(iii=faceSet.begin();iii!=faceSet.end();++iii) ToSplitVec.back().second.push_back(*iii); } } ss.pop(); // Second step actually add new vertices and split them. typename tri::Allocator<MeshType>::template PointerUpdater<VertexPointer> pu; VertexIterator firstVp = tri::Allocator<MeshType>::AddVertices(m,ToSplitVec.size(),pu); for(size_t i =0;i<ToSplitVec.size();++i) { // qDebug("Splitting Vertex %i",ToSplitVec[i].first-&*m.vert.begin()); VertexPointer np=ToSplitVec[i].first; pu.Update(np); firstVp->ImportData(*np); // loop on the face to be changed, and also compute the movement vector; CoordType delta(0,0,0); for(size_t j=0;j<ToSplitVec[i].second.size();++j) { FaceInt ff=ToSplitVec[i].second[j]; ff.first->V(ff.second)=&*firstVp; delta+=Barycenter(*(ff.first))-np->cP(); } delta /= ToSplitVec[i].second.size(); firstVp->P() = firstVp->P() + delta * moveThreshold; firstVp++; } return ToSplitVec.size(); } // Auxiliary function for sorting the non manifold faces according to their area. Used in RemoveNonManifoldFace struct CompareAreaFP { bool operator ()(FacePointer const& f1, FacePointer const& f2) const { return DoubleArea(*f1) < DoubleArea(*f2); } }; /// Removal of faces that were incident on a non manifold edge. static int RemoveNonManifoldFace(MeshType& m) { FaceIterator fi; int count_fd = 0; std::vector<FacePointer> ToDelVec; for(fi=m.face.begin(); fi!=m.face.end();++fi) if (!fi->IsD()) { if ((!IsManifold(*fi,0))|| (!IsManifold(*fi,1))|| (!IsManifold(*fi,2))) ToDelVec.push_back(&*fi); } std::sort(ToDelVec.begin(),ToDelVec.end(),CompareAreaFP()); for(size_t i=0;i<ToDelVec.size();++i) { if(!ToDelVec[i]->IsD()) { FaceType &ff= *ToDelVec[i]; if ((!IsManifold(ff,0))|| (!IsManifold(ff,1))|| (!IsManifold(ff,2))) { for(int j=0;j<3;++j) if(!face::IsBorder<FaceType>(ff,j)) vcg::face::FFDetach<FaceType>(ff,j); Allocator<MeshType>::DeleteFace(m,ff); count_fd++; } } } return count_fd; } /* The following functions remove faces that are geometrically "bad" according to edges and area criteria. They remove the faces that are out of a given range of area or edges (e.g. faces too large or too small, or with edges too short or too long) but that could be topologically correct. These functions can optionally take into account only the selected faces. */ template<bool Selected> static int RemoveFaceOutOfRangeAreaSel(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)()) { FaceIterator fi; int count_fd = 0; MinAreaThr*=2; MaxAreaThr*=2; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) if(!Selected || (*fi).IsS()) { const ScalarType doubleArea=DoubleArea<FaceType>(*fi); if((doubleArea<=MinAreaThr) || (doubleArea>=MaxAreaThr) ) { Allocator<MeshType>::DeleteFace(m,*fi); count_fd++; } } return count_fd; } // alias for the old style. Kept for backward compatibility static int RemoveZeroAreaFace(MeshType& m) { return RemoveFaceOutOfRangeArea(m);} // Aliases for the functions that do not look at selection static int RemoveFaceOutOfRangeArea(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)()) { return RemoveFaceOutOfRangeAreaSel<false>(m,MinAreaThr,MaxAreaThr); } /** * Is the mesh only composed by quadrilaterals? */ static bool IsBitQuadOnly(const MeshType &m) { typedef typename MeshType::FaceType F; tri::RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->Flags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp != F::FAUX0 && tmp != F::FAUX1 && tmp != F::FAUX2) return false; } return true; } static bool IsFaceFauxConsistent(MeshType &m) { RequirePerFaceFlags(m); RequireFFAdjacency(m); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { for(int z=0;z<(*fi).VN();++z) { FacePointer fp = fi->FFp(z); int zp = fi->FFi(z); if(fi->IsF(z) != fp->IsF(zp)) return false; } } return true; } /** * Is the mesh only composed by triangles? (non polygonal faces) */ static bool IsBitTriOnly(const MeshType &m) { tri::RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if ( !fi->IsD() && fi->IsAnyF() ) return false; } return true; } static bool IsBitPolygonal(const MeshType &m){ return !IsBitTriOnly(m); } /** * Is the mesh only composed by quadrilaterals and triangles? (no pentas, etc) * It assumes that the bits are consistent. In that case there can be only a single faux edge. */ static bool IsBitTriQuadOnly(const MeshType &m) { tri::RequirePerFaceFlags(m); typedef typename MeshType::FaceType F; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp!=F::FAUX0 && tmp!=F::FAUX1 && tmp!=F::FAUX2 && tmp!=0 ) return false; } return true; } /** * How many quadrilaterals? * It assumes that the bits are consistent. In that case we count the tris with a single faux edge and divide by two. */ static int CountBitQuads(const MeshType &m) { tri::RequirePerFaceFlags(m); typedef typename MeshType::FaceType F; int count=0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp==F::FAUX0 || tmp==F::FAUX1 || tmp==F::FAUX2) count++; } return count / 2; } /** * How many triangles? (non polygonal faces) */ static int CountBitTris(const MeshType &m) { tri::RequirePerFaceFlags(m); int count=0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { if (!(fi->IsAnyF())) count++; } return count; } /** * How many polygons of any kind? (including triangles) * it assumes that there are no faux vertexes (e.g vertices completely surrounded by faux edges) */ static int CountBitPolygons(const MeshType &m) { tri::RequirePerFaceFlags(m); int count = 0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { if (fi->IsF(0)) count++; if (fi->IsF(1)) count++; if (fi->IsF(2)) count++; } return m.fn - count/2; } /** * The number of polygonal faces is * FN - EN_f (each faux edge hides exactly one triangular face or in other words a polygon of n edges has n-3 faux edges.) * In the general case where a The number of polygonal faces is * FN - EN_f + VN_f * where: * EN_f is the number of faux edges. * VN_f is the number of faux vertices (e.g vertices completely surrounded by faux edges) * as a intuitive proof think to a internal vertex that is collapsed onto a border of a polygon: * it deletes 2 faces, 1 faux edges and 1 vertex so to keep the balance you have to add back the removed vertex. */ static int CountBitLargePolygons(MeshType &m) { tri::RequirePerFaceFlags(m); UpdateFlags<MeshType>::VertexSetV(m); // First loop Clear all referenced vertices for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) for(int i=0;i<3;++i) fi->V(i)->ClearV(); // Second Loop, count (twice) faux edges and mark all vertices touched by non faux edges // (e.g vertexes on the boundary of a polygon) int countE = 0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;++i) { if (fi->IsF(i)) countE++; else { fi->V0(i)->SetV(); fi->V1(i)->SetV(); } } } // Third Loop, count the number of referenced vertexes that are completely surrounded by faux edges. int countV = 0; for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD() && !vi->IsV()) countV++; return m.fn - countE/2 + countV ; } /** * Checks that the mesh has consistent per-face faux edges * (the ones that merges triangles into larger polygons). * A border edge should never be faux, and faux edges should always be * reciprocated by another faux edges. * It requires FF adjacency. */ static bool HasConsistentPerFaceFauxFlag(const MeshType &m) { RequireFFAdjacency(m); RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) for (int k=0; k<3; k++) if( ( fi->IsF(k) != fi->cFFp(k)->IsF(fi->cFFi(k)) ) || ( fi->IsF(k) && face::IsBorder(*fi,k)) ) { return false; } return true; } /** * Count the number of non manifold edges in a polylinemesh, e.g. the edges where there are more than 2 incident faces. * */ static int CountNonManifoldEdgeEE( MeshType & m, bool SelectFlag=false) { assert(m.fn == 0 && m.en >0); // just to be sure we are using an edge mesh... RequireEEAdjacency(m); tri::UpdateTopology<MeshType>::EdgeEdge(m); if(SelectFlag) UpdateSelection<MeshType>::VertexClear(m); int nonManifoldCnt=0; SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0); // First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter. EdgeIterator ei; for (ei = m.edge.begin(); ei != m.edge.end(); ++ei) if (!ei->IsD()) { TD[(*ei).V(0)]++; TD[(*ei).V(1)]++; } tri::UpdateFlags<MeshType>::VertexClearV(m); // Second Loop, Check that each vertex have been seen 1 or 2 times. for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD()) { if( TD[vi] >2 ) { if(SelectFlag) (*vi).SetS(); nonManifoldCnt++; } } return nonManifoldCnt; } /** * Count the number of non manifold edges in a mesh, e.g. the edges where there are more than 2 incident faces. * * Note that this test is not enough to say that a mesh is two manifold, * you have to count also the non manifold vertexes. */ static int CountNonManifoldEdgeFF( MeshType & m, bool SelectFlag=false) { RequireFFAdjacency(m); int nmfBit[3]; nmfBit[0]= FaceType::NewBitFlag(); nmfBit[1]= FaceType::NewBitFlag(); nmfBit[2]= FaceType::NewBitFlag(); UpdateFlags<MeshType>::FaceClear(m,nmfBit[0]+nmfBit[1]+nmfBit[2]); if(SelectFlag){ UpdateSelection<MeshType>::VertexClear(m); UpdateSelection<MeshType>::FaceClear(m); } int edgeCnt = 0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if (!fi->IsD()) { for(int i=0;i<3;++i) if(!IsManifold(*fi,i)) { if(!(*fi).IsUserBit(nmfBit[i])) { ++edgeCnt; if(SelectFlag) { (*fi).V0(i)->SetS(); (*fi).V1(i)->SetS(); } // follow the ring of faces incident on edge i; face::Pos<FaceType> nmf(&*fi,i); do { if(SelectFlag) nmf.F()->SetS(); nmf.F()->SetUserBit(nmfBit[nmf.E()]); nmf.NextF(); } while(nmf.f != &*fi); } } } } return edgeCnt; } /** Count (and eventually select) non 2-Manifold vertexes of a mesh * e.g. the vertices with a non 2-manif. neighbourhood but that do not belong to not 2-manif edges. * typical situation two cones connected by one vertex. */ static int CountNonManifoldVertexFF( MeshType & m, bool selectVert = true ) { RequireFFAdjacency(m); if(selectVert) UpdateSelection<MeshType>::VertexClear(m); int nonManifoldCnt=0; SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0); // First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter. FaceIterator fi; for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { TD[(*fi).V(0)]++; TD[(*fi).V(1)]++; TD[(*fi).V(2)]++; } tri::UpdateFlags<MeshType>::VertexClearV(m); // Second Loop. // mark out of the game the vertexes that are incident on non manifold edges. for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;++i) if (!IsManifold(*fi,i)) { (*fi).V0(i)->SetV(); (*fi).V1(i)->SetV(); } } // Third Loop, for safe vertexes, check that the number of faces that you can reach starting // from it and using FF is the same of the previously counted. for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;i++) if(!(*fi).V(i)->IsV()){ (*fi).V(i)->SetV(); face::Pos<FaceType> pos(&(*fi),i); int starSizeFF = pos.NumberOfIncidentFaces(); if (starSizeFF != TD[(*fi).V(i)]) { if(selectVert) (*fi).V(i)->SetS(); nonManifoldCnt++; } } } return nonManifoldCnt; } /// Very simple test of water tightness. No boundary and no non manifold edges. /// Assume that it is orientable. /// It could be debated if a closed non orientable surface is watertight or not. /// /// The rationale of not testing orientability here is that /// it requires FFAdj while this test do not require any adjacency. /// static bool IsWaterTight(MeshType & m) { int edgeNum=0,edgeBorderNum=0,edgeNonManifNum=0; CountEdgeNum(m, edgeNum, edgeBorderNum,edgeNonManifNum); return (edgeBorderNum==0) && (edgeNonManifNum==0); } static void CountEdgeNum( MeshType & m, int &total_e, int &boundary_e, int &non_manif_e ) { std::vector< typename tri::UpdateTopology<MeshType>::PEdge > edgeVec; tri::UpdateTopology<MeshType>::FillEdgeVector(m,edgeVec,true); sort(edgeVec.begin(), edgeVec.end()); // Lo ordino per vertici total_e=0; boundary_e=0; non_manif_e=0; size_t f_on_cur_edge =1; for(size_t i=0;i<edgeVec.size();++i) { if(( (i+1) == edgeVec.size()) || !(edgeVec[i] == edgeVec[i+1])) { ++total_e; if(f_on_cur_edge==1) ++boundary_e; if(f_on_cur_edge>2) ++non_manif_e; f_on_cur_edge=1; } else { ++f_on_cur_edge; } } // end for } static int CountHoles( MeshType & m) { int numholev=0; FaceIterator fi; FaceIterator gi; vcg::face::Pos<FaceType> he; vcg::face::Pos<FaceType> hei; std::vector< std::vector<CoordType> > holes; //indices of vertices vcg::tri::UpdateFlags<MeshType>::VertexClearS(m); gi=m.face.begin(); fi=gi; for(fi=m.face.begin();fi!=m.face.end();fi++)//for all faces do { for(int j=0;j<3;j++)//for all edges { if(fi->V(j)->IsS()) continue; if(face::IsBorder(*fi,j))//found an unvisited border edge { he.Set(&(*fi),j,fi->V(j)); //set the face-face iterator to the current face, edge and vertex std::vector<CoordType> hole; //start of a new hole hole.push_back(fi->P(j)); // including the first vertex numholev++; he.v->SetS(); //set the current vertex as selected he.NextB(); //go to the next boundary edge while(fi->V(j) != he.v)//will we do not encounter the first boundary edge. { CoordType newpoint = he.v->P(); //select its vertex. if(he.v->IsS())//check if this vertex was selected already, because then we have an additional hole. { //cut and paste the additional hole. std::vector<CoordType> hole2; int index = static_cast<int>(find(hole.begin(),hole.end(),newpoint) - hole.begin()); for(unsigned int i=index; i<hole.size(); i++) hole2.push_back(hole[i]); hole.resize(index); if(hole2.size()!=0) //annoying in degenerate cases holes.push_back(hole2); } hole.push_back(newpoint); numholev++; he.v->SetS(); //set the current vertex as selected he.NextB(); //go to the next boundary edge } holes.push_back(hole); } } } return static_cast<int>(holes.size()); } /* Compute the set of connected components of a given mesh it fills a vector of pair < int , faceptr > with, for each connecteed component its size and a represnant */ static int CountConnectedComponents(MeshType &m) { std::vector< std::pair<int,FacePointer> > CCV; return ConnectedComponents(m,CCV); } static int ConnectedComponents(MeshType &m, std::vector< std::pair<int,FacePointer> > &CCV) { tri::RequireFFAdjacency(m); CCV.clear(); tri::UpdateSelection<MeshType>::FaceClear(m); std::stack<FacePointer> sf; FacePointer fpt=&*(m.face.begin()); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) { if(!((*fi).IsD()) && !(*fi).IsS()) { (*fi).SetS(); CCV.push_back(std::make_pair(0,&*fi)); sf.push(&*fi); while (!sf.empty()) { fpt=sf.top(); ++CCV.back().first; sf.pop(); for(int j=0;j<3;++j) { if( !face::IsBorder(*fpt,j) ) { FacePointer l = fpt->FFp(j); if( !(*l).IsS() ) { (*l).SetS(); sf.push(l); } } } } } } return int(CCV.size()); } static void ComputeValence( MeshType &m, typename MeshType::PerVertexIntHandle &h) { for(VertexIterator vi=m.vert.begin(); vi!= m.vert.end();++vi) h[vi]=0; for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) { if(!((*fi).IsD())) for(int j=0;j<fi->VN();j++) ++h[tri::Index(m,fi->V(j))]; } } /** GENUS. A topologically invariant property of a surface defined as the largest number of non-intersecting simple closed curves that can be drawn on the surface without separating it. Roughly speaking, it is the number of holes in a surface. The genus g of a closed surface, also called the geometric genus, is related to the Euler characteristic by the relation $chi$ by $chi==2-2g$. The genus of a connected, orientable surface is an integer representing the maximum number of cuttings along closed simple curves without rendering the resultant manifold disconnected. It is equal to the number of handles on it. For general polyhedra the <em>Euler Formula</em> is: V - E + F = 2 - 2G - B where V is the number of vertices, F is the number of faces, E is the number of edges, G is the genus and B is the number of <em>boundary polygons</em>. The above formula is valid for a mesh with one single connected component. By considering multiple connected components the formula becomes: V - E + F = 2C - 2Gs - B -> 2Gs = - ( V-E+F +B -2C) where C is the number of connected components and Gs is the sum of the genus of all connected components. Note that in the case of a mesh with boundaries the intuitive meaning of Genus is less intuitive that it could seem. A closed sphere, a sphere with one hole (e.g. a disk) and a sphere with two holes (e.g. a tube) all of them have Genus == 0 */ static int MeshGenus(int nvert,int nedges,int nfaces, int numholes, int numcomponents) { return -((nvert + nfaces - nedges + numholes - 2 * numcomponents) / 2); } static int MeshGenus(MeshType &m) { int nvert=m.vn; int nfaces=m.fn; int boundary_e,total_e,nonmanif_e; CountEdgeNum(m,total_e,boundary_e,nonmanif_e); int numholes=CountHoles(m); int numcomponents=CountConnectedComponents(m); int G=MeshGenus(nvert,total_e,nfaces,numholes,numcomponents); return G; } /** * Check if the given mesh is regular, semi-regular or irregular. * * Each vertex of a \em regular mesh has valence 6 except for border vertices * which have valence 4. * * A \em semi-regular mesh is derived from an irregular one applying * 1-to-4 subdivision recursively. (not checked for now) * * All other meshes are \em irregular. */ static void IsRegularMesh(MeshType &m, bool &Regular, bool &Semiregular) { RequireVFAdjacency(m); Regular = true; VertexIterator vi; // for each vertex the number of edges are count for (vi = m.vert.begin(); vi != m.vert.end(); ++vi) { if (!vi->IsD()) { face::Pos<FaceType> he((*vi).VFp(), &*vi); face::Pos<FaceType> ht = he; int n=0; bool border=false; do { ++n; ht.NextE(); if (ht.IsBorder()) border=true; } while (ht != he); if (border) n = n/2; if ((n != 6)&&(!border && n != 4)) { Regular = false; break; } } } if (!Regular) Semiregular = false; else { // For now we do not account for semi-regularity Semiregular = false; } } static bool IsCoherentlyOrientedMesh(MeshType &m) { for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) for(int i=0;i<3;++i) if(!face::CheckOrientation(*fi,i)) return false; return true; } static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable) { RequireFFAdjacency(m); assert(&Oriented != &Orientable); assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized Orientable = true; Oriented = true; tri::UpdateSelection<MeshType>::FaceClear(m); std::stack<FacePointer> faces; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if (!fi->IsD() && !fi->IsS()) { // each face put in the stack is selected (and oriented) fi->SetS(); // New section of code to orient the initial face correctly if(fi->N()[2]>0.0) { face::SwapEdge<FaceType,true>(*fi, 0); //fi->N = vcg::Normal<float>(*fi); vcg::face::ComputeNormal(*fi); } // End of new code section. faces.push(&(*fi)); // empty the stack while (!faces.empty()) { FacePointer fp = faces.top(); faces.pop(); // make consistently oriented the adjacent faces for (int j = 0; j < 3; j++) { //get one of the adjacent face FacePointer fpaux = fp->FFp(j); int iaux = fp->FFi(j); if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j)) { if (!CheckOrientation(*fpaux, iaux)) { Oriented = false; if (!fpaux->IsS()) { face::SwapEdge<FaceType,true>(*fpaux, iaux); // New line to update face normal //fpaux->N = vcg::Normal<float>(*fpaux); face::ComputeNormal(*fpaux); // end of new section. assert(CheckOrientation(*fpaux, iaux)); } else { Orientable = false; break; } } // put the oriented face into the stack if (!fpaux->IsS()) { fpaux->SetS(); faces.push(fpaux); } } } } } if (!Orientable) break; } } /// Flip the orientation of the whole mesh flipping all the faces (by swapping the first two vertices) static void FlipMesh(MeshType &m, bool selected=false) { for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) if(!selected || (*fi).IsS()) { face::SwapEdge<FaceType,false>((*fi), 0); if (HasPerWedgeTexCoord(m)) std::swap((*fi).WT(0),(*fi).WT(1)); } } /// Flip a mesh so that its normals are orented outside. /// Just for safety it uses a voting scheme. /// It assumes that /// mesh has already has coherent normals. /// mesh is watertight and signle component. static bool FlipNormalOutside(MeshType &m) { if(m.vert.empty()) return false; tri::UpdateNormal<MeshType>::PerVertexAngleWeighted(m); tri::UpdateNormal<MeshType>::NormalizePerVertex(m); std::vector< VertexPointer > minVertVec; std::vector< VertexPointer > maxVertVec; // The set of directions to be choosen std::vector< CoordType > dirVec; dirVec.push_back(CoordType(1,0,0)); dirVec.push_back(CoordType(0,1,0)); dirVec.push_back(CoordType(0,0,1)); dirVec.push_back(CoordType( 1, 1,1)); dirVec.push_back(CoordType(-1, 1,1)); dirVec.push_back(CoordType(-1,-1,1)); dirVec.push_back(CoordType( 1,-1,1)); for(size_t i=0;i<dirVec.size();++i) { Normalize(dirVec[i]); minVertVec.push_back(&*m.vert.begin()); maxVertVec.push_back(&*m.vert.begin()); } for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD()) { for(size_t i=0;i<dirVec.size();++i) { if( (*vi).cP().dot(dirVec[i]) < minVertVec[i]->P().dot(dirVec[i])) minVertVec[i] = &*vi; if( (*vi).cP().dot(dirVec[i]) > maxVertVec[i]->P().dot(dirVec[i])) maxVertVec[i] = &*vi; } } int voteCount=0; ScalarType angleThreshold = cos(math::ToRad(85.0)); for(size_t i=0;i<dirVec.size();++i) { // qDebug("Min vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],minVertVec[i]->P()[0],minVertVec[i]->P()[1],minVertVec[i]->P()[2]); // qDebug("Max vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],maxVertVec[i]->P()[0],maxVertVec[i]->P()[1],maxVertVec[i]->P()[2]); if(minVertVec[i]->N().dot(dirVec[i]) > angleThreshold ) voteCount++; if(maxVertVec[i]->N().dot(dirVec[i]) < -angleThreshold ) voteCount++; } // qDebug("votecount = %i",voteCount); if(voteCount < int(dirVec.size())/2) return false; FlipMesh(m); return true; } // Search and remove small single triangle folds // - a face has normal opposite to all other faces // - choose the edge that brings to the face f1 containing the vertex opposite to that edge. static int RemoveFaceFoldByFlip(MeshType &m, float normalThresholdDeg=175, bool repeat=true) { RequireFFAdjacency(m); RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UpdateTopology<MeshType>::FaceFace(m); tri::UnMarkAll(m); count = 0; ScalarType NormalThrRad = math::ToRad(normalThresholdDeg); ScalarType eps = 0.0001; // this epsilon value is in absolute value. It is a distance from edge in baricentric coords. //detection stage for(FaceIterator fi=m.face.begin();fi!= m.face.end();++fi ) if(!(*fi).IsV()) { Point3<ScalarType> NN = vcg::TriangleNormal((*fi)).Normalize(); if( vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(0)).Normalize()) > NormalThrRad && vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(1)).Normalize()) > NormalThrRad && vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(2)).Normalize()) > NormalThrRad ) { (*fi).SetS(); //(*fi).C()=Color4b(Color4b::Red); // now search the best edge to flip for(int i=0;i<3;i++) { Point3<ScalarType> &p=(*fi).P2(i); Point3<ScalarType> L; bool ret = vcg::InterpolationParameters((*(*fi).FFp(i)),TriangleNormal(*(*fi).FFp(i)),p,L); if(ret && L[0]>eps && L[1]>eps && L[2]>eps) { (*fi).FFp(i)->SetS(); (*fi).FFp(i)->SetV(); //(*fi).FFp(i)->C()=Color4b(Color4b::Green); if(face::CheckFlipEdge<FaceType>( *fi, i )) { face::FlipEdge<FaceType>( *fi, i ); ++count; ++total; } } } } } // tri::UpdateNormal<MeshType>::PerFace(m); } while( repeat && count ); return total; } static int RemoveTVertexByFlip(MeshType &m, float threshold=40, bool repeat=true) { RequireFFAdjacency(m); RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UpdateTopology<MeshType>::FaceFace(m); tri::UnMarkAll(m); count = 0; //detection stage for(unsigned int index = 0 ; index < m.face.size(); ++index ) { FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy; sides[0] = Distance(f->P(0), f->P(1)); sides[1] = Distance(f->P(1), f->P(2)); sides[2] = Distance(f->P(2), f->P(0)); // Find largest triangle side int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides); if( tri::IsMarked(m,f->V2(i) )) continue; if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] ) { tri::Mark(m,f->V2(i)); if(face::CheckFlipEdge<FaceType>( *f, i )) { // Check if EdgeFlipping improves quality FacePointer g = f->FFp(i); int k = f->FFi(i); Triangle3<ScalarType> t1(f->P(i), f->P1(i), f->P2(i)), t2(g->P(k), g->P1(k), g->P2(k)), t3(f->P(i), g->P2(k), f->P2(i)), t4(g->P(k), f->P2(i), g->P2(k)); if ( std::min( QualityFace(t1), QualityFace(t2) ) < std::min( QualityFace(t3), QualityFace(t4) )) { face::FlipEdge<FaceType>( *f, i ); ++count; ++total; } } } } // tri::UpdateNormal<MeshType>::PerFace(m); } while( repeat && count ); return total; } static int RemoveTVertexByCollapse(MeshType &m, float threshold=40, bool repeat=true) { RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UnMarkAll(m); count = 0; //detection stage for(unsigned int index = 0 ; index < m.face.size(); ++index ) { FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy; sides[0] = Distance(f->P(0), f->P(1)); sides[1] = Distance(f->P(1), f->P(2)); sides[2] = Distance(f->P(2), f->P(0)); int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides); if( tri::IsMarked(m,f->V2(i) )) continue; if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] ) { tri::Mark(m,f->V2(i)); int j = Distance(dummy,f->P(i))<Distance(dummy,f->P1(i))?i:(i+1)%3; f->P2(i) = f->P(j); tri::Mark(m,f->V(j)); ++count; ++total; } } tri::Clean<MeshType>::RemoveDuplicateVertex(m); tri::Allocator<MeshType>::CompactFaceVector(m); tri::Allocator<MeshType>::CompactVertexVector(m); } while( repeat && count ); return total; } static bool SelfIntersections(MeshType &m, std::vector<FaceType*> &ret) { RequirePerFaceMark(m); ret.clear(); int referredBit = FaceType::NewBitFlag(); tri::UpdateFlags<MeshType>::FaceClear(m,referredBit); TriMeshGrid gM; gM.Set(m.face.begin(),m.face.end()); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { (*fi).SetUserBit(referredBit); Box3< ScalarType> bbox; (*fi).GetBBox(bbox); std::vector<FaceType*> inBox; vcg::tri::GetInBoxFace(m, gM, bbox,inBox); bool Intersected=false; typename std::vector<FaceType*>::iterator fib; for(fib=inBox.begin();fib!=inBox.end();++fib) { if(!(*fib)->IsUserBit(referredBit) && (*fib != &*fi) ) if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){ ret.push_back(*fib); if(!Intersected) { ret.push_back(&*fi); Intersected=true; } } } inBox.clear(); } FaceType::DeleteBitFlag(referredBit); return (ret.size()>0); } /** This function simply test that the vn and fn counters be consistent with the size of the containers and the number of deleted simplexes. */ static bool IsSizeConsistent(MeshType &m) { int DeletedVertNum=0; for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if((*vi).IsD()) DeletedVertNum++; int DeletedEdgeNum=0; for (EdgeIterator ei = m.edge.begin(); ei != m.edge.end(); ++ei) if((*ei).IsD()) DeletedEdgeNum++; int DeletedFaceNum=0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if((*fi).IsD()) DeletedFaceNum++; if(size_t(m.vn+DeletedVertNum) != m.vert.size()) return false; if(size_t(m.en+DeletedEdgeNum) != m.edge.size()) return false; if(size_t(m.fn+DeletedFaceNum) != m.face.size()) return false; return true; } /** This function simply test that all the faces have a consistent face-face topology relation. useful for checking that a topology modifying algorithm does not mess something. */ static bool IsFFAdjacencyConsistent(MeshType &m) { RequireFFAdjacency(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { for(int i=0;i<3;++i) if(!FFCorrectness(*fi, i)) return false; } return true; } /** This function simply test that a mesh has some reasonable tex coord. */ static bool HasConsistentPerWedgeTexCoord(MeshType &m) { tri::RequirePerFaceWedgeTexCoord(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { FaceType &f=(*fi); if( ! ( (f.WT(0).N() == f.WT(1).N()) && (f.WT(0).N() == (*fi).WT(2).N()) ) ) return false; // all the vertices must have the same index. if((*fi).WT(0).N() <0) return false; // no undefined texture should be allowed } return true; } /** Simple check that there are no face with all collapsed tex coords. */ static bool HasZeroTexCoordFace(MeshType &m) { tri::RequirePerFaceWedgeTexCoord(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { if( (*fi).WT(0).P() == (*fi).WT(1).P() && (*fi).WT(0).P() == (*fi).WT(2).P() ) return false; } return true; } /** This function test if two triangular faces of a mesh intersect. It assumes that the faces (as storage) are different (e.g different address) If the two faces are different but coincident (same set of vertexes) return true. if the faces share an edge no test is done. if the faces share only a vertex, the opposite edge is tested against the face */ static bool TestFaceFaceIntersection(FaceType *f0,FaceType *f1) { assert(f0!=f1); int sv = face::CountSharedVertex(f0,f1); if(sv==3) return true; if(sv==0) return (vcg::IntersectionTriangleTriangle<FaceType>((*f0),(*f1))); // if the faces share only a vertex, the opposite edge (as a segment) is tested against the face // to avoid degenerate cases where the two triangles have the opposite edge on a common plane // we offset the segment to test toward the shared vertex if(sv==1) { int i0,i1; ScalarType a,b; face::FindSharedVertex(f0,f1,i0,i1); CoordType shP = f0->V(i0)->P()*0.5; if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f0).V1(i0)->P()*0.5+shP,(*f0).V2(i0)->P()*0.5+shP), *f1, a, b) ) { // a,b are the param coords of the intersection point of the segment. if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false; return true; } if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f1).V1(i1)->P()*0.5+shP,(*f1).V2(i1)->P()*0.5+shP), *f0, a, b) ) { // a,b are the param coords of the intersection point of the segment. if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false; return true; } } return false; } /** This function merge all the vertices that are closer than the given radius */ static int MergeCloseVertex(MeshType &m, const ScalarType radius) { int mergedCnt=0; mergedCnt = ClusterVertex(m,radius); RemoveDuplicateVertex(m,true); return mergedCnt; } static int ClusterVertex(MeshType &m, const ScalarType radius) { if(m.vn==0) return 0; // some spatial indexing structure does not work well with deleted vertices... tri::Allocator<MeshType>::CompactVertexVector(m); typedef vcg::SpatialHashTable<VertexType, ScalarType> SampleSHT; SampleSHT sht; tri::EmptyTMark<MeshType> markerFunctor; std::vector<VertexType*> closests; int mergedCnt=0; sht.Set(m.vert.begin(), m.vert.end()); UpdateFlags<MeshType>::VertexClearV(m); for(VertexIterator viv = m.vert.begin(); viv!= m.vert.end(); ++viv) if(!(*viv).IsD() && !(*viv).IsV()) { (*viv).SetV(); Point3<ScalarType> p = viv->cP(); Box3<ScalarType> bb(p-Point3<ScalarType>(radius,radius,radius),p+Point3<ScalarType>(radius,radius,radius)); GridGetInBox(sht, markerFunctor, bb, closests); // qDebug("Vertex %i has %i closest", &*viv - &*m.vert.begin(),closests.size()); for(size_t i=0; i<closests.size(); ++i) { ScalarType dist = Distance(p,closests[i]->cP()); if(dist < radius && !closests[i]->IsV()) { // printf("%f %f \n",dist,radius); mergedCnt++; closests[i]->SetV(); closests[i]->P()=p; } } } return mergedCnt; } static std::pair<int,int> RemoveSmallConnectedComponentsSize(MeshType &m, int maxCCSize) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { std::vector<typename MeshType::FacePointer> FPV; if(CCV[i].first<maxCCSize) { DeletedCC++; for(ci.start(m,CCV[i].second);!ci.completed();++ci) FPV.push_back(*ci); typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /// Remove the connected components smaller than a given diameter // it returns a pair with the number of connected components and the number of deleted ones. static std::pair<int,int> RemoveSmallConnectedComponentsDiameter(MeshType &m, ScalarType maxDiameter) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; tri::ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { Box3<ScalarType> bb; std::vector<typename MeshType::FacePointer> FPV; for(ci.start(m,CCV[i].second);!ci.completed();++ci) { FPV.push_back(*ci); bb.Add((*ci)->P(0)); bb.Add((*ci)->P(1)); bb.Add((*ci)->P(2)); } if(bb.Diag()<maxDiameter) { DeletedCC++; typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) tri::Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /// Remove the connected components greater than a given diameter // it returns a pair with the number of connected components and the number of deleted ones. static std::pair<int,int> RemoveHugeConnectedComponentsDiameter(MeshType &m, ScalarType minDiameter) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; tri::ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { Box3f bb; std::vector<typename MeshType::FacePointer> FPV; for(ci.start(m,CCV[i].second);!ci.completed();++ci) { FPV.push_back(*ci); bb.Add((*ci)->P(0)); bb.Add((*ci)->P(1)); bb.Add((*ci)->P(2)); } if(bb.Diag()>minDiameter) { DeletedCC++; typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) tri::Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /** Select the folded faces using an angle threshold on the face normal. The face is selected if the dot product between the face normal and the normal of the plane fitted using the vertices of the one ring faces is below the cosThreshold. The cosThreshold requires a negative cosine value (a positive value is clamp to zero). */ static void SelectFoldedFaceFromOneRingFaces(MeshType &m, ScalarType cosThreshold) { tri::RequireVFAdjacency(m); tri::RequirePerFaceNormal(m); tri::RequirePerVertexNormal(m); vcg::tri::UpdateSelection<MeshType>::FaceClear(m); vcg::tri::UpdateNormal<MeshType>::PerFaceNormalized(m); vcg::tri::UpdateNormal<MeshType>::PerVertexNormalized(m); vcg::tri::UpdateTopology<MeshType>::VertexFace(m); if (cosThreshold > 0) cosThreshold = 0; #pragma omp parallel for schedule(dynamic, 10) for (int i = 0; i < m.face.size(); i++) { std::vector<typename MeshType::VertexPointer> nearVertex; std::vector<typename MeshType::CoordType> point; typename MeshType::FacePointer f = &m.face[i]; for (int j = 0; j < 3; j++) { std::vector<typename MeshType::VertexPointer> temp; vcg::face::VVStarVF<typename MeshType::FaceType>(f->V(j), temp); typename std::vector<typename MeshType::VertexPointer>::iterator iter = temp.begin(); for (; iter != temp.end(); iter++) { if ((*iter) != f->V1(j) && (*iter) != f->V2(j)) { nearVertex.push_back((*iter)); point.push_back((*iter)->P()); } } nearVertex.push_back(f->V(j)); point.push_back(f->P(j)); } if (point.size() > 3) { vcg::Plane3<typename MeshType::ScalarType> plane; vcg::FitPlaneToPointSet(point, plane); float avgDot = 0; for (int j = 0; j < nearVertex.size(); j++) avgDot += plane.Direction().dot(nearVertex[j]->N()); avgDot /= nearVertex.size(); typename MeshType::VertexType::NormalType normal; if (avgDot < 0) normal = -plane.Direction(); else normal = plane.Direction(); if (normal.dot(f->N()) < cosThreshold) f->SetS(); } } } }; // end class /*@}*/ } //End Namespace Tri } // End Namespace vcg #endif
GB_unaryop__minv_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_uint64 // op(A') function: GB_tran__minv_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
gimple-pretty-print.c
/* Modula-3: modified */ /* Pretty formatting of GIMPLE statements and expressions. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> and Diego Novillo <dnovillo@google.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "diagnostic.h" #include "tree-pretty-print.h" #include "gimple-pretty-print.h" #include "hashtab.h" #include "tree-flow.h" #include "tree-pass.h" #include "gimple.h" #include "value-prof.h" EXTERN_C_START #define INDENT(SPACE) \ do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0) static pretty_printer buffer; static bool initialized = false; #define GIMPLE_NIY do_niy (buffer,gs) /* Try to print on BUFFER a default message for the unrecognized gimple statement GS. */ static void do_niy (pretty_printer *buffer, gimple gs) { pp_printf (buffer, "<<< Unknown GIMPLE statement: %s >>>\n", gimple_code_name[(int) gimple_code (gs)]); } /* Initialize the pretty printer on FILE if needed. */ static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, NULL, 0); pp_needs_newline (&buffer) = true; initialized = true; } buffer.buffer->stream = file; } /* Emit a newline and SPC indentantion spaces to BUFFER. */ static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } /* Print the GIMPLE statement GS on stderr. */ DEBUG_FUNCTION void debug_gimple_stmt (gimple gs) { print_gimple_stmt (stderr, gs, 0, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Dump GIMPLE statement G to FILE using SPC indentantion spaces and FLAGS as in dump_gimple_stmt. */ void print_gimple_stmt (FILE *file, gimple g, int spc, int flags) { maybe_init_pretty_print (file); dump_gimple_stmt (&buffer, g, spc, flags); pp_flush (&buffer); } /* Dump GIMPLE statement G to FILE using SPC indentantion spaces and FLAGS as in dump_gimple_stmt. Print only the right-hand side of the statement. */ void print_gimple_expr (FILE *file, gimple g, int spc, int flags) { flags |= TDF_RHS_ONLY; maybe_init_pretty_print (file); dump_gimple_stmt (&buffer, g, spc, flags); } /* Print the GIMPLE sequence SEQ on BUFFER using SPC indentantion spaces and FLAGS as in dump_gimple_stmt. */ static void dump_gimple_seq (pretty_printer *buffer, gimple_seq seq, int spc, int flags) { gimple_stmt_iterator i; for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) { gimple gs = gsi_stmt (i); INDENT (spc); dump_gimple_stmt (buffer, gs, spc, flags); if (!gsi_one_before_end_p (i)) pp_newline (buffer); } } /* Dump GIMPLE sequence SEQ to FILE using SPC indentantion spaces and FLAGS as in dump_gimple_stmt. */ void print_gimple_seq (FILE *file, gimple_seq seq, int spc, int flags) { maybe_init_pretty_print (file); dump_gimple_seq (&buffer, seq, spc, flags); pp_flush (&buffer); } /* Print the GIMPLE sequence SEQ on stderr. */ DEBUG_FUNCTION void debug_gimple_seq (gimple_seq seq) { print_gimple_seq (stderr, seq, 0, TDF_VOPS|TDF_MEMSYMS); } /* A simple helper to pretty-print some of the gimple tuples in the printf style. The format modifiers are preceeded by '%' and are: 'G' - outputs a string corresponding to the code of the given gimple, 'S' - outputs a gimple_seq with indent of spc + 2, 'T' - outputs the tree t, 'd' - outputs an int as a decimal, 's' - outputs a string, 'n' - outputs a newline, '+' - increases indent by 2 then outputs a newline, '-' - decreases indent by 2 then outputs a newline. */ static void dump_gimple_fmt (pretty_printer *buffer, int spc, int flags, const char *fmt, ...) { va_list args; const char *c; const char *tmp; va_start (args, fmt); for (c = fmt; *c; c++) { if (*c == '%') { gimple_seq seq; tree t; gimple g; switch (*++c) { case 'G': g = va_arg (args, gimple); tmp = gimple_code_name[gimple_code (g)]; pp_string (buffer, tmp); break; case 'S': seq = va_arg (args, gimple_seq); pp_newline (buffer); dump_gimple_seq (buffer, seq, spc + 2, flags); newline_and_indent (buffer, spc); break; case 'T': t = va_arg (args, tree); if (t == NULL_TREE) pp_string (buffer, "NULL"); else dump_generic_node (buffer, t, spc, flags, false); break; case 'd': pp_decimal_int (buffer, va_arg (args, int)); break; case 's': pp_string (buffer, va_arg (args, char *)); break; case 'n': newline_and_indent (buffer, spc); break; case '+': spc += 2; newline_and_indent (buffer, spc); break; case '-': spc -= 2; newline_and_indent (buffer, spc); break; default: gcc_unreachable (); } } else pp_character (buffer, *c); } va_end (args); } /* Helper for dump_gimple_assign. Print the unary RHS of the assignment GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_unary_rhs (pretty_printer *buffer, gimple gs, int spc, int flags) { enum tree_code rhs_code = gimple_assign_rhs_code (gs); tree lhs = gimple_assign_lhs (gs); tree rhs = gimple_assign_rhs1 (gs); switch (rhs_code) { case VIEW_CONVERT_EXPR: case ASSERT_EXPR: dump_generic_node (buffer, rhs, spc, flags, false); break; case FIXED_CONVERT_EXPR: case ADDR_SPACE_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: pp_character (buffer, '('); dump_generic_node (buffer, TREE_TYPE (lhs), spc, flags, false); pp_string (buffer, ") "); if (op_prio (rhs) < op_code_prio (rhs_code)) { pp_character (buffer, '('); dump_generic_node (buffer, rhs, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, rhs, spc, flags, false); break; case PAREN_EXPR: pp_string (buffer, "(("); dump_generic_node (buffer, rhs, spc, flags, false); pp_string (buffer, "))"); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, rhs, spc, flags, false); pp_character (buffer, '>'); break; default: if (TREE_CODE_CLASS (rhs_code) == tcc_declaration || TREE_CODE_CLASS (rhs_code) == tcc_constant || TREE_CODE_CLASS (rhs_code) == tcc_reference || rhs_code == SSA_NAME || rhs_code == ADDR_EXPR || rhs_code == CONSTRUCTOR) { dump_generic_node (buffer, rhs, spc, flags, false); break; } else if (rhs_code == BIT_NOT_EXPR) pp_character (buffer, '~'); else if (rhs_code == TRUTH_NOT_EXPR) pp_character (buffer, '!'); else if (rhs_code == NEGATE_EXPR) pp_character (buffer, '-'); else { pp_character (buffer, '['); pp_string (buffer, tree_code_name [rhs_code]); pp_string (buffer, "] "); } if (op_prio (rhs) < op_code_prio (rhs_code)) { pp_character (buffer, '('); dump_generic_node (buffer, rhs, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, rhs, spc, flags, false); break; } } /* Helper for dump_gimple_assign. Print the binary RHS of the assignment GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_binary_rhs (pretty_printer *buffer, gimple gs, int spc, int flags) { const char *p; enum tree_code code = gimple_assign_rhs_code (gs); switch (code) { case COMPLEX_EXPR: case MIN_EXPR: case MAX_EXPR: case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: case VEC_PACK_FIX_TRUNC_EXPR: case VEC_EXTRACT_EVEN_EXPR: case VEC_EXTRACT_ODD_EXPR: case VEC_INTERLEAVE_HIGH_EXPR: case VEC_INTERLEAVE_LOW_EXPR: for (p = tree_code_name [(int) code]; *p; p++) pp_character (buffer, TOUPPER (*p)); pp_string (buffer, " <"); dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false); pp_character (buffer, '>'); break; default: if (op_prio (gimple_assign_rhs1 (gs)) <= op_code_prio (code)) { pp_character (buffer, '('); dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false); pp_space (buffer); pp_string (buffer, op_symbol_code (gimple_assign_rhs_code (gs))); pp_space (buffer); if (op_prio (gimple_assign_rhs2 (gs)) <= op_code_prio (code)) { pp_character (buffer, '('); dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false); } } /* Helper for dump_gimple_assign. Print the ternary RHS of the assignment GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_ternary_rhs (pretty_printer *buffer, gimple gs, int spc, int flags) { const char *p; enum tree_code code = gimple_assign_rhs_code (gs); switch (code) { case WIDEN_MULT_PLUS_EXPR: case WIDEN_MULT_MINUS_EXPR: for (p = tree_code_name [(int) code]; *p; p++) pp_character (buffer, TOUPPER (*p)); pp_string (buffer, " <"); dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false); pp_character (buffer, '>'); break; case FMA_EXPR: dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false); pp_string (buffer, " * "); dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false); pp_string (buffer, " + "); dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false); break; default: gcc_unreachable (); } } /* Dump the gimple assignment GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_assign (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { tree last; if (gimple_num_ops (gs) == 2) last = NULL_TREE; else if (gimple_num_ops (gs) == 3) last = gimple_assign_rhs2 (gs); else gcc_unreachable (); dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T>", gs, tree_code_name[gimple_assign_rhs_code (gs)], gimple_assign_lhs (gs), gimple_assign_rhs1 (gs), last); } else { if (!(flags & TDF_RHS_ONLY)) { dump_generic_node (buffer, gimple_assign_lhs (gs), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (gimple_assign_nontemporal_move_p (gs)) pp_string (buffer, "{nt}"); if (gimple_has_volatile_ops (gs)) pp_string (buffer, "{v}"); pp_space (buffer); } if (gimple_num_ops (gs) == 2) dump_unary_rhs (buffer, gs, spc, flags); else if (gimple_num_ops (gs) == 3) dump_binary_rhs (buffer, gs, spc, flags); else if (gimple_num_ops (gs) == 4) dump_ternary_rhs (buffer, gs, spc, flags); else gcc_unreachable (); if (!(flags & TDF_RHS_ONLY)) pp_semicolon(buffer); } } /* Dump the return statement GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_return (pretty_printer *buffer, gimple gs, int spc, int flags) { tree t; t = gimple_return_retval (gs); if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, t); else { pp_string (buffer, "return"); if (t) { pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } pp_semicolon (buffer); } } /* Dump the call arguments for a gimple call. BUFFER, FLAGS are as in dump_gimple_call. */ static void dump_gimple_call_args (pretty_printer *buffer, gimple gs, int flags) { size_t i; for (i = 0; i < gimple_call_num_args (gs); i++) { dump_generic_node (buffer, gimple_call_arg (gs, i), 0, flags, false); if (i < gimple_call_num_args (gs) - 1) pp_string (buffer, ", "); } if (gimple_call_va_arg_pack_p (gs)) { if (gimple_call_num_args (gs) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } } /* Dump the points-to solution *PT to BUFFER. */ static void pp_points_to_solution (pretty_printer *buffer, struct pt_solution *pt) { if (pt->anything) { pp_string (buffer, "anything "); return; } if (pt->nonlocal) pp_string (buffer, "nonlocal "); if (pt->escaped) pp_string (buffer, "escaped "); if (pt->ipa_escaped) pp_string (buffer, "unit-escaped "); if (pt->null) pp_string (buffer, "null "); if (pt->vars && !bitmap_empty_p (pt->vars)) { bitmap_iterator bi; unsigned i; pp_string (buffer, "{ "); EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi) { tree var = referenced_var_lookup (cfun, i); if (var) { dump_generic_node (buffer, var, 0, dump_flags, false); if (DECL_PT_UID (var) != DECL_UID (var)) { pp_string (buffer, "ptD."); pp_decimal_int (buffer, DECL_PT_UID (var)); } } else { pp_string (buffer, "D."); pp_decimal_int (buffer, i); } pp_character (buffer, ' '); } pp_character (buffer, '}'); if (pt->vars_contains_global) pp_string (buffer, " (glob)"); if (pt->vars_contains_restrict) pp_string (buffer, " (restr)"); } } /* Dump the call statement GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags) { tree lhs = gimple_call_lhs (gs); if (flags & TDF_ALIAS) { struct pt_solution *pt; pt = gimple_call_use_set (gs); if (!pt_solution_empty_p (pt)) { pp_string (buffer, "# USE = "); pp_points_to_solution (buffer, pt); newline_and_indent (buffer, spc); } pt = gimple_call_clobber_set (gs); if (!pt_solution_empty_p (pt)) { pp_string (buffer, "# CLB = "); pp_points_to_solution (buffer, pt); newline_and_indent (buffer, spc); } } if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, gimple_call_fn (gs), lhs); if (gimple_call_num_args (gs) > 0) { pp_string (buffer, ", "); dump_gimple_call_args (buffer, gs, flags); } pp_character (buffer, '>'); } else { if (lhs && !(flags & TDF_RHS_ONLY)) { dump_generic_node (buffer, lhs, spc, flags, false); pp_string (buffer, " ="); if (gimple_has_volatile_ops (gs)) pp_string (buffer, "{v}"); pp_space (buffer); } print_call_name (buffer, gimple_call_fn (gs), flags); pp_string (buffer, " ("); dump_gimple_call_args (buffer, gs, flags); pp_character (buffer, ')'); if (!(flags & TDF_RHS_ONLY)) pp_semicolon (buffer); } if (gimple_call_chain (gs)) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, gimple_call_chain (gs), spc, flags, false); pp_character (buffer, ']'); } if (gimple_call_return_slot_opt_p (gs)) pp_string (buffer, " [return slot optimization]"); if (gimple_call_tail_p (gs)) pp_string (buffer, " [tail call]"); } /* Dump the switch statement GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_switch (pretty_printer *buffer, gimple gs, int spc, int flags) { unsigned int i; GIMPLE_CHECK (gs, GIMPLE_SWITCH); if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", gs, gimple_switch_index (gs)); else { pp_string (buffer, "switch ("); dump_generic_node (buffer, gimple_switch_index (gs), spc, flags, true); pp_string (buffer, ") <"); } for (i = 0; i < gimple_switch_num_labels (gs); i++) { tree case_label = gimple_switch_label (gs, i); if (case_label == NULL_TREE) continue; dump_generic_node (buffer, case_label, spc, flags, false); pp_character (buffer, ' '); dump_generic_node (buffer, CASE_LABEL (case_label), spc, flags, false); if (i < gimple_switch_num_labels (gs) - 1) pp_string (buffer, ", "); } pp_character (buffer, '>'); } /* Dump the gimple conditional GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_cond (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T, %T>", gs, tree_code_name [gimple_cond_code (gs)], gimple_cond_lhs (gs), gimple_cond_rhs (gs), gimple_cond_true_label (gs), gimple_cond_false_label (gs)); else { if (!(flags & TDF_RHS_ONLY)) pp_string (buffer, "if ("); dump_generic_node (buffer, gimple_cond_lhs (gs), spc, flags, false); pp_space (buffer); pp_string (buffer, op_symbol_code (gimple_cond_code (gs))); pp_space (buffer); dump_generic_node (buffer, gimple_cond_rhs (gs), spc, flags, false); if (!(flags & TDF_RHS_ONLY)) { pp_character (buffer, ')'); if (gimple_cond_true_label (gs)) { pp_string (buffer, " goto "); dump_generic_node (buffer, gimple_cond_true_label (gs), spc, flags, false); pp_semicolon (buffer); } if (gimple_cond_false_label (gs)) { pp_string (buffer, " else goto "); dump_generic_node (buffer, gimple_cond_false_label (gs), spc, flags, false); pp_semicolon (buffer); } } } } /* Dump a GIMPLE_LABEL tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_label (pretty_printer *buffer, gimple gs, int spc, int flags) { tree label = gimple_label_label (gs); if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label); else { dump_generic_node (buffer, label, spc, flags, false); pp_character (buffer, ':'); } if (DECL_NONLOCAL (label)) pp_string (buffer, " [non-local]"); if ((flags & TDF_EH) && EH_LANDING_PAD_NR (label)) pp_printf (buffer, " [LP %d]", EH_LANDING_PAD_NR (label)); } /* Dump a GIMPLE_GOTO tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_goto (pretty_printer *buffer, gimple gs, int spc, int flags) { tree label = gimple_goto_dest (gs); if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label); else dump_gimple_fmt (buffer, spc, flags, "goto %T;", label); } /* Dump a GIMPLE_BIND tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_bind (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <", gs); else pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { tree var; for (var = gimple_bind_vars (gs); var; var = DECL_CHAIN (var)) { newline_and_indent (buffer, 2); print_declaration (buffer, var, spc, flags); } if (gimple_bind_vars (gs)) pp_newline (buffer); } pp_newline (buffer); dump_gimple_seq (buffer, gimple_bind_body (gs), spc + 2, flags); newline_and_indent (buffer, spc); if (flags & TDF_RAW) pp_character (buffer, '>'); else pp_character (buffer, '}'); } /* Dump a GIMPLE_TRY tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_try (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { const char *type; if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH) type = "GIMPLE_TRY_CATCH"; else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY) type = "GIMPLE_TRY_FINALLY"; else type = "UNKNOWN GIMPLE_TRY"; dump_gimple_fmt (buffer, spc, flags, "%G <%s,%+EVAL <%S>%nCLEANUP <%S>%->", gs, type, gimple_try_eval (gs), gimple_try_cleanup (gs)); } else { pp_string (buffer, "try"); newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_try_eval (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH) { newline_and_indent (buffer, spc); pp_string (buffer, "catch"); newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); } else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY) { newline_and_indent (buffer, spc); pp_string (buffer, "finally"); newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); } else pp_string (buffer, " <UNKNOWN GIMPLE_TRY> {"); pp_newline (buffer); dump_gimple_seq (buffer, gimple_try_cleanup (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } /* Dump a GIMPLE_CATCH tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_catch (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+CATCH <%S>%->", gs, gimple_catch_types (gs), gimple_catch_handler (gs)); else dump_gimple_fmt (buffer, spc, flags, "catch (%T)%+{%S}", gimple_catch_types (gs), gimple_catch_handler (gs)); } /* Dump a GIMPLE_EH_FILTER tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_eh_filter (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+FAILURE <%S>%->", gs, gimple_eh_filter_types (gs), gimple_eh_filter_failure (gs)); else dump_gimple_fmt (buffer, spc, flags, "<<<eh_filter (%T)>>>%+{%+%S%-}", gimple_eh_filter_types (gs), gimple_eh_filter_failure (gs)); } /* Dump a GIMPLE_EH_MUST_NOT_THROW tuple. */ static void dump_gimple_eh_must_not_throw (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, gimple_eh_must_not_throw_fndecl (gs)); else dump_gimple_fmt (buffer, spc, flags, "<<<eh_must_not_throw (%T)>>>", gimple_eh_must_not_throw_fndecl (gs)); } /* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_resx (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs, gimple_resx_region (gs)); else dump_gimple_fmt (buffer, spc, flags, "resx %d", gimple_resx_region (gs)); } /* Dump a GIMPLE_EH_DISPATCH tuple on the pretty_printer BUFFER. */ static void dump_gimple_eh_dispatch (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs, gimple_eh_dispatch_region (gs)); else dump_gimple_fmt (buffer, spc, flags, "eh_dispatch %d", gimple_eh_dispatch_region (gs)); } /* Dump a GIMPLE_DEBUG tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_debug (pretty_printer *buffer, gimple gs, int spc, int flags) { switch (gs->gsbase.subcode) { case GIMPLE_DEBUG_BIND: if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G BIND <%T, %T>", gs, gimple_debug_bind_get_var (gs), gimple_debug_bind_get_value (gs)); else dump_gimple_fmt (buffer, spc, flags, "# DEBUG %T => %T", gimple_debug_bind_get_var (gs), gimple_debug_bind_get_value (gs)); break; default: gcc_unreachable (); } } /* Dump a GIMPLE_OMP_FOR tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_for (pretty_printer *buffer, gimple gs, int spc, int flags) { size_t i; if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs, gimple_omp_body (gs)); dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags); dump_gimple_fmt (buffer, spc, flags, " >,"); for (i = 0; i < gimple_omp_for_collapse (gs); i++) dump_gimple_fmt (buffer, spc, flags, "%+%T, %T, %T, %s, %T,%n", gimple_omp_for_index (gs, i), gimple_omp_for_initial (gs, i), gimple_omp_for_final (gs, i), tree_code_name[gimple_omp_for_cond (gs, i)], gimple_omp_for_incr (gs, i)); dump_gimple_fmt (buffer, spc, flags, "PRE_BODY <%S>%->", gimple_omp_for_pre_body (gs)); } else { pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags); for (i = 0; i < gimple_omp_for_collapse (gs); i++) { if (i) spc += 2; newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc, flags, false); pp_string (buffer, " = "); dump_generic_node (buffer, gimple_omp_for_initial (gs, i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc, flags, false); pp_space (buffer); switch (gimple_omp_for_cond (gs, i)) { case LT_EXPR: pp_character (buffer, '<'); break; case GT_EXPR: pp_character (buffer, '>'); break; case LE_EXPR: pp_string (buffer, "<="); break; case GE_EXPR: pp_string (buffer, ">="); break; default: gcc_unreachable (); } pp_space (buffer); dump_generic_node (buffer, gimple_omp_for_final (gs, i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc, flags, false); pp_string (buffer, " = "); dump_generic_node (buffer, gimple_omp_for_incr (gs, i), spc, flags, false); pp_character (buffer, ')'); } if (!gimple_seq_empty_p (gimple_omp_body (gs))) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } } /* Dump a GIMPLE_OMP_CONTINUE tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_continue (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs, gimple_omp_continue_control_def (gs), gimple_omp_continue_control_use (gs)); } else { pp_string (buffer, "#pragma omp continue ("); dump_generic_node (buffer, gimple_omp_continue_control_def (gs), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, gimple_omp_continue_control_use (gs), spc, flags, false); pp_character (buffer, ')'); } } /* Dump a GIMPLE_OMP_SINGLE tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_single (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs, gimple_omp_body (gs)); dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags); dump_gimple_fmt (buffer, spc, flags, " >"); } else { pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags); if (!gimple_seq_empty_p (gimple_omp_body (gs))) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } } /* Dump a GIMPLE_OMP_SECTIONS tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_sections (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs, gimple_omp_body (gs)); dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags); dump_gimple_fmt (buffer, spc, flags, " >"); } else { pp_string (buffer, "#pragma omp sections"); if (gimple_omp_sections_control (gs)) { pp_string (buffer, " <"); dump_generic_node (buffer, gimple_omp_sections_control (gs), spc, flags, false); pp_character (buffer, '>'); } dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags); if (!gimple_seq_empty_p (gimple_omp_body (gs))) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } } /* Dump a GIMPLE_OMP_{MASTER,ORDERED,SECTION} tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_block (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs, gimple_omp_body (gs)); else { switch (gimple_code (gs)) { case GIMPLE_OMP_MASTER: pp_string (buffer, "#pragma omp master"); break; case GIMPLE_OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); break; case GIMPLE_OMP_SECTION: pp_string (buffer, "#pragma omp section"); break; default: gcc_unreachable (); } if (!gimple_seq_empty_p (gimple_omp_body (gs))) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } } /* Dump a GIMPLE_OMP_CRITICAL tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_critical (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs, gimple_omp_body (gs)); else { pp_string (buffer, "#pragma omp critical"); if (gimple_omp_critical_name (gs)) { pp_string (buffer, " ("); dump_generic_node (buffer, gimple_omp_critical_name (gs), spc, flags, false); pp_character (buffer, ')'); } if (!gimple_seq_empty_p (gimple_omp_body (gs))) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } } /* Dump a GIMPLE_OMP_RETURN tuple on the pretty_printer BUFFER. */ static void dump_gimple_omp_return (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <nowait=%d>", gs, (int) gimple_omp_return_nowait_p (gs)); } else { pp_string (buffer, "#pragma omp return"); if (gimple_omp_return_nowait_p (gs)) pp_string (buffer, "(nowait)"); } } /* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_asm (pretty_printer *buffer, gimple gs, int spc, int flags) { unsigned int i, n, f, fields; if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+STRING <%n%s%n>", gs, gimple_asm_string (gs)); n = gimple_asm_noutputs (gs); if (n) { newline_and_indent (buffer, spc + 2); pp_string (buffer, "OUTPUT: "); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_output_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } } n = gimple_asm_ninputs (gs); if (n) { newline_and_indent (buffer, spc + 2); pp_string (buffer, "INPUT: "); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_input_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } } n = gimple_asm_nclobbers (gs); if (n) { newline_and_indent (buffer, spc + 2); pp_string (buffer, "CLOBBER: "); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_clobber_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } } n = gimple_asm_nlabels (gs); if (n) { newline_and_indent (buffer, spc + 2); pp_string (buffer, "LABEL: "); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_label_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } } newline_and_indent (buffer, spc); pp_character (buffer, '>'); } else { pp_string (buffer, "__asm__"); if (gimple_asm_volatile_p (gs)) pp_string (buffer, " __volatile__"); if (gimple_asm_nlabels (gs)) pp_string (buffer, " goto"); pp_string (buffer, "(\""); pp_string (buffer, gimple_asm_string (gs)); pp_string (buffer, "\""); if (gimple_asm_nlabels (gs)) fields = 4; else if (gimple_asm_nclobbers (gs)) fields = 3; else if (gimple_asm_ninputs (gs)) fields = 2; else if (gimple_asm_noutputs (gs)) fields = 1; else fields = 0; for (f = 0; f < fields; ++f) { pp_string (buffer, " : "); switch (f) { case 0: n = gimple_asm_noutputs (gs); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_output_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } break; case 1: n = gimple_asm_ninputs (gs); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_input_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } break; case 2: n = gimple_asm_nclobbers (gs); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_clobber_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } break; case 3: n = gimple_asm_nlabels (gs); for (i = 0; i < n; i++) { dump_generic_node (buffer, gimple_asm_label_op (gs, i), spc, flags, false); if (i < n - 1) pp_string (buffer, ", "); } break; default: gcc_unreachable (); } } pp_string (buffer, ");"); } } /* Dump a PHI node PHI. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_phi (pretty_printer *buffer, gimple phi, int spc, int flags) { size_t i; tree lhs = gimple_phi_result (phi); if (flags & TDF_ALIAS && POINTER_TYPE_P (TREE_TYPE (lhs)) && SSA_NAME_PTR_INFO (lhs)) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); pp_string (buffer, "PT = "); pp_points_to_solution (buffer, &pi->pt); newline_and_indent (buffer, spc); if (pi->align != 1) pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u", pi->align, pi->misalign); newline_and_indent (buffer, spc); pp_string (buffer, "# "); } if (flags & TDF_RAW) dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", phi, gimple_phi_result (phi)); else { dump_generic_node (buffer, lhs, spc, flags, false); pp_string (buffer, " = PHI <"); } for (i = 0; i < gimple_phi_num_args (phi); i++) { if ((flags & TDF_LINENO) && gimple_phi_arg_has_location (phi, i)) { expanded_location xloc; xloc = expand_location (gimple_phi_arg_location (phi, i)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, ":"); pp_decimal_int (buffer, xloc.column); pp_string (buffer, "] "); } dump_generic_node (buffer, gimple_phi_arg_def (phi, i), spc, flags, false); pp_character (buffer, '('); pp_decimal_int (buffer, gimple_phi_arg_edge (phi, i)->src->index); pp_character (buffer, ')'); if (i < gimple_phi_num_args (phi) - 1) pp_string (buffer, ", "); } pp_character (buffer, '>'); } /* Dump a GIMPLE_OMP_PARALLEL tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_omp_parallel (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs, gimple_omp_body (gs)); dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags); dump_gimple_fmt (buffer, spc, flags, " >, %T, %T%n>", gimple_omp_parallel_child_fn (gs), gimple_omp_parallel_data_arg (gs)); } else { gimple_seq body; pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags); if (gimple_omp_parallel_child_fn (gs)) { pp_string (buffer, " [child fn: "); dump_generic_node (buffer, gimple_omp_parallel_child_fn (gs), spc, flags, false); pp_string (buffer, " ("); if (gimple_omp_parallel_data_arg (gs)) dump_generic_node (buffer, gimple_omp_parallel_data_arg (gs), spc, flags, false); else pp_string (buffer, "???"); pp_string (buffer, ")]"); } body = gimple_omp_body (gs); if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, body, spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } else if (body) { pp_newline (buffer); dump_gimple_seq (buffer, body, spc + 2, flags); } } } /* Dump a GIMPLE_OMP_TASK tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_omp_task (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs, gimple_omp_body (gs)); dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags); dump_gimple_fmt (buffer, spc, flags, " >, %T, %T, %T, %T, %T%n>", gimple_omp_task_child_fn (gs), gimple_omp_task_data_arg (gs), gimple_omp_task_copy_fn (gs), gimple_omp_task_arg_size (gs), gimple_omp_task_arg_size (gs)); } else { gimple_seq body; pp_string (buffer, "#pragma omp task"); dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags); if (gimple_omp_task_child_fn (gs)) { pp_string (buffer, " [child fn: "); dump_generic_node (buffer, gimple_omp_task_child_fn (gs), spc, flags, false); pp_string (buffer, " ("); if (gimple_omp_task_data_arg (gs)) dump_generic_node (buffer, gimple_omp_task_data_arg (gs), spc, flags, false); else pp_string (buffer, "???"); pp_string (buffer, ")]"); } body = gimple_omp_body (gs); if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); pp_newline (buffer); dump_gimple_seq (buffer, body, spc + 4, flags); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } else if (body) { pp_newline (buffer); dump_gimple_seq (buffer, body, spc + 2, flags); } } } /* Dump a GIMPLE_OMP_ATOMIC_LOAD tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_omp_atomic_load (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs, gimple_omp_atomic_load_lhs (gs), gimple_omp_atomic_load_rhs (gs)); } else { pp_string (buffer, "#pragma omp atomic_load"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, gimple_omp_atomic_load_lhs (gs), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); pp_character (buffer, '*'); dump_generic_node (buffer, gimple_omp_atomic_load_rhs (gs), spc, flags, false); } } /* Dump a GIMPLE_OMP_ATOMIC_STORE tuple on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ static void dump_gimple_omp_atomic_store (pretty_printer *buffer, gimple gs, int spc, int flags) { if (flags & TDF_RAW) { dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, gimple_omp_atomic_store_val (gs)); } else { pp_string (buffer, "#pragma omp atomic_store ("); dump_generic_node (buffer, gimple_omp_atomic_store_val (gs), spc, flags, false); pp_character (buffer, ')'); } } /* Dump all the memory operands for statement GS. BUFFER, SPC and FLAGS are as in dump_gimple_stmt. */ static void dump_gimple_mem_ops (pretty_printer *buffer, gimple gs, int spc, int flags) { tree vdef = gimple_vdef (gs); tree vuse = gimple_vuse (gs); if (!ssa_operands_active () || !gimple_references_memory_p (gs)) return; if (vdef != NULL_TREE) { pp_string (buffer, "# "); dump_generic_node (buffer, vdef, spc + 2, flags, false); pp_string (buffer, " = VDEF <"); dump_generic_node (buffer, vuse, spc + 2, flags, false); pp_character (buffer, '>'); newline_and_indent (buffer, spc); } else if (vuse != NULL_TREE) { pp_string (buffer, "# VUSE <"); dump_generic_node (buffer, vuse, spc + 2, flags, false); pp_character (buffer, '>'); newline_and_indent (buffer, spc); } } /* Dump the gimple statement GS on the pretty printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). */ void dump_gimple_stmt (pretty_printer *buffer, gimple gs, int spc, int flags) { if (!gs) return; if (flags & TDF_STMTADDR) pp_printf (buffer, "<&%p> ", (void *) gs); if ((flags & TDF_LINENO) && gimple_has_location (gs)) { expanded_location xloc = expand_location (gimple_location (gs)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, ":"); pp_decimal_int (buffer, xloc.column); pp_string (buffer, "] "); } if (flags & TDF_EH) { int lp_nr = lookup_stmt_eh_lp (gs); if (lp_nr > 0) pp_printf (buffer, "[LP %d] ", lp_nr); else if (lp_nr < 0) pp_printf (buffer, "[MNT %d] ", -lp_nr); } if ((flags & (TDF_VOPS|TDF_MEMSYMS)) && gimple_has_mem_ops (gs)) dump_gimple_mem_ops (buffer, gs, spc, flags); if ((flags & TDF_ALIAS) && gimple_has_lhs (gs)) { tree lhs = gimple_get_lhs (gs); if (TREE_CODE (lhs) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (lhs)) && SSA_NAME_PTR_INFO (lhs)) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); pp_string (buffer, "# PT = "); pp_points_to_solution (buffer, &pi->pt); newline_and_indent (buffer, spc); if (pi->align != 1) { pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u", pi->align, pi->misalign); newline_and_indent (buffer, spc); } } } switch (gimple_code (gs)) { case GIMPLE_ASM: dump_gimple_asm (buffer, gs, spc, flags); break; case GIMPLE_ASSIGN: dump_gimple_assign (buffer, gs, spc, flags); break; case GIMPLE_BIND: dump_gimple_bind (buffer, gs, spc, flags); break; case GIMPLE_CALL: dump_gimple_call (buffer, gs, spc, flags); break; case GIMPLE_COND: dump_gimple_cond (buffer, gs, spc, flags); break; case GIMPLE_LABEL: dump_gimple_label (buffer, gs, spc, flags); break; case GIMPLE_GOTO: dump_gimple_goto (buffer, gs, spc, flags); break; case GIMPLE_NOP: pp_string (buffer, "GIMPLE_NOP"); break; case GIMPLE_RETURN: dump_gimple_return (buffer, gs, spc, flags); break; case GIMPLE_SWITCH: dump_gimple_switch (buffer, gs, spc, flags); break; case GIMPLE_TRY: dump_gimple_try (buffer, gs, spc, flags); break; case GIMPLE_PHI: dump_gimple_phi (buffer, gs, spc, flags); break; case GIMPLE_OMP_PARALLEL: dump_gimple_omp_parallel (buffer, gs, spc, flags); break; case GIMPLE_OMP_TASK: dump_gimple_omp_task (buffer, gs, spc, flags); break; case GIMPLE_OMP_ATOMIC_LOAD: dump_gimple_omp_atomic_load (buffer, gs, spc, flags); break; case GIMPLE_OMP_ATOMIC_STORE: dump_gimple_omp_atomic_store (buffer, gs, spc, flags); break; case GIMPLE_OMP_FOR: dump_gimple_omp_for (buffer, gs, spc, flags); break; case GIMPLE_OMP_CONTINUE: dump_gimple_omp_continue (buffer, gs, spc, flags); break; case GIMPLE_OMP_SINGLE: dump_gimple_omp_single (buffer, gs, spc, flags); break; case GIMPLE_OMP_RETURN: dump_gimple_omp_return (buffer, gs, spc, flags); break; case GIMPLE_OMP_SECTIONS: dump_gimple_omp_sections (buffer, gs, spc, flags); break; case GIMPLE_OMP_SECTIONS_SWITCH: pp_string (buffer, "GIMPLE_SECTIONS_SWITCH"); break; case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: dump_gimple_omp_block (buffer, gs, spc, flags); break; case GIMPLE_OMP_CRITICAL: dump_gimple_omp_critical (buffer, gs, spc, flags); break; case GIMPLE_CATCH: dump_gimple_catch (buffer, gs, spc, flags); break; case GIMPLE_EH_FILTER: dump_gimple_eh_filter (buffer, gs, spc, flags); break; case GIMPLE_EH_MUST_NOT_THROW: dump_gimple_eh_must_not_throw (buffer, gs, spc, flags); break; case GIMPLE_RESX: dump_gimple_resx (buffer, gs, spc, flags); break; case GIMPLE_EH_DISPATCH: dump_gimple_eh_dispatch (buffer, gs, spc, flags); break; case GIMPLE_DEBUG: dump_gimple_debug (buffer, gs, spc, flags); break; case GIMPLE_PREDICT: pp_string (buffer, "// predicted "); if (gimple_predict_outcome (gs)) pp_string (buffer, "likely by "); else pp_string (buffer, "unlikely by "); pp_string (buffer, predictor_name (gimple_predict_predictor (gs))); pp_string (buffer, " predictor."); break; default: GIMPLE_NIY; } /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); } /* Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces and details described by flags. */ static void dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; gimple stmt; edge_iterator ei; if (flags & TDF_BLOCKS) { INDENT (indent); pp_string (buffer, "# BLOCK "); pp_decimal_int (buffer, bb->index); if (bb->frequency) { pp_string (buffer, " freq:"); pp_decimal_int (buffer, bb->frequency); } if (bb->count) { pp_string (buffer, " count:"); pp_widest_integer (buffer, bb->count); } if (flags & TDF_LINENO) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (!is_gimple_debug (gsi_stmt (gsi)) && get_lineno (gsi_stmt (gsi)) != UNKNOWN_LOCATION) { pp_string (buffer, ", starting at line "); pp_decimal_int (buffer, get_lineno (gsi_stmt (gsi))); break; } if (bb->discriminator) { pp_string (buffer, ", discriminator "); pp_decimal_int (buffer, bb->discriminator); } } newline_and_indent (buffer, indent); pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->preds) if (flags & TDF_SLIM) { pp_character (buffer, ' '); if (e->src == ENTRY_BLOCK_PTR) pp_string (buffer, "ENTRY"); else pp_decimal_int (buffer, e->src->index); } else dump_edge_info (buffer->buffer->stream, e, 0); pp_newline (buffer); } else { stmt = first_stmt (bb); if (!stmt || gimple_code (stmt) != GIMPLE_LABEL) { INDENT (indent - 2); pp_string (buffer, "<bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">:"); pp_newline (buffer); } } pp_write_text_to_stream (buffer); if (cfun) check_bb_profile (bb, buffer->buffer->stream); } /* Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->succs) if (flags & TDF_SLIM) { pp_character (buffer, ' '); if (e->dest == EXIT_BLOCK_PTR) pp_string (buffer, "EXIT"); else pp_decimal_int (buffer, e->dest->index); } else dump_edge_info (buffer->buffer->stream, e, 1); pp_newline (buffer); } /* Dump PHI nodes of basic block BB to BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags) { gimple_stmt_iterator i; for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) { gimple phi = gsi_stmt (i); if (is_gimple_reg (gimple_phi_result (phi)) || (flags & TDF_VOPS)) { INDENT (indent); pp_string (buffer, "# "); dump_gimple_phi (buffer, phi, indent, flags); pp_newline (buffer); } } } /* Dump jump to basic block BB that is represented implicitly in the cfg to BUFFER. */ static void pp_cfg_jump (pretty_printer *buffer, basic_block bb) { gimple stmt; stmt = first_stmt (bb); pp_string (buffer, "goto <bb "); pp_decimal_int (buffer, bb->index); pp_character (buffer, '>'); if (stmt && gimple_code (stmt) == GIMPLE_LABEL) { pp_string (buffer, " ("); dump_generic_node (buffer, gimple_label_label (stmt), 0, 0, false); pp_character (buffer, ')'); pp_semicolon (buffer); } else pp_semicolon (buffer); } /* Dump edges represented implicitly in basic block BB to BUFFER, indented by INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; gimple stmt; stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) { edge true_edge, false_edge; /* When we are emitting the code or changing CFG, it is possible that the edges are not yet created. When we are using debug_bb in such a situation, we do not want it to crash. */ if (EDGE_COUNT (bb->succs) != 2) return; extract_true_false_edges_from_block (bb, &true_edge, &false_edge); INDENT (indent + 2); pp_cfg_jump (buffer, true_edge->dest); newline_and_indent (buffer, indent); pp_string (buffer, "else"); newline_and_indent (buffer, indent + 2); pp_cfg_jump (buffer, false_edge->dest); pp_newline (buffer); return; } /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ e = find_fallthru_edge (bb->succs); if (e && e->dest != bb->next_bb) { INDENT (indent); if ((flags & TDF_LINENO) && e->goto_locus != UNKNOWN_LOCATION ) { expanded_location goto_xloc; goto_xloc = expand_location (e->goto_locus); pp_character (buffer, '['); if (goto_xloc.file) { pp_string (buffer, goto_xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, goto_xloc.line); pp_string (buffer, " : "); pp_decimal_int (buffer, goto_xloc.column); pp_string (buffer, "] "); } pp_cfg_jump (buffer, e->dest); pp_newline (buffer); } } /* Dumps basic block BB to buffer BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void gimple_dump_bb_buff (pretty_printer *buffer, basic_block bb, int indent, int flags) { gimple_stmt_iterator gsi; gimple stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header (buffer, bb, indent, flags); dump_phi_nodes (buffer, bb, indent, flags); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { int curr_indent; stmt = gsi_stmt (gsi); curr_indent = gimple_code (stmt) == GIMPLE_LABEL ? label_indent : indent; INDENT (curr_indent); dump_gimple_stmt (buffer, stmt, curr_indent, flags); pp_newline (buffer); dump_histograms_for_stmt (cfun, buffer->buffer->stream, stmt); } dump_implicit_edges (buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end (buffer, bb, indent, flags); } /* Dumps basic block BB to FILE with details described by FLAGS and indented by INDENT spaces. */ void gimple_dump_bb (basic_block bb, FILE *file, int indent, int flags) { maybe_init_pretty_print (file); gimple_dump_bb_buff (&buffer, bb, indent, flags); pp_flush (&buffer); } EXTERN_C_END
MinimumImageFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_MINIMUMIMAGEFILTER_H #define BK_MINIMUMIMAGEFILTER_H #include <algorithm> #include <cassert> #include <initializer_list> #include <numeric> #include <type_traits> #include <vector> #include <bkDataset/lib/bkDataset_export.h> namespace bk { class BKDATASET_EXPORT MinimumImageFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = MinimumImageFilter; //==================================================================================================== //===== MEMBERS //==================================================================================================== std::vector<unsigned int> _kernel_size; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR MinimumImageFilter(); MinimumImageFilter(const self_type& other); MinimumImageFilter(self_type&& other) noexcept; MinimumImageFilter(unsigned int nDims, unsigned int size); /// @} /// @{ -------------------------------------------------- DTOR ~MinimumImageFilter(); /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET KERNEL SIZE [[nodiscard]] const std::vector<unsigned int>& kernel_size() const; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type& other) -> self_type&; [[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type&; /// @} /// @{ -------------------------------------------------- SET KERNEL SIZE template<typename T> void set_kernel_size(std::initializer_list<T> ilist) { _kernel_size.assign(ilist); } template<typename Iter> void set_kernel_size(Iter first, Iter last) { _kernel_size.assign(first, last); } void set_kernel_size(unsigned int nDims, unsigned int size); /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TImage> [[nodiscard]] TImage apply(const TImage& img) const { assert(!_kernel_size.empty() && "call set_kernel_size() first"); using value_type = typename TImage::value_type; TImage res; res.set_size(img.size()); #pragma omp parallel for for (unsigned int i = 0; i < img.num_values(); ++i) { std::vector<value_type> values = img.values_of_neighborhood(i, _kernel_size); if (!values.empty()) { std::sort(values.begin(), values.end()); res[i] = values.front(); } else { res[i] = img[i]; } } return res; } /// @} }; // class MinimumImageFilter } // namespace bk #endif //BK_MINIMUMIMAGEFILTER_H
GB_binop__pair_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = 1 #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT64 || GxB_NO_PAIR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unaryop__ainv_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint64 // op(A') function: GB_tran__ainv_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
unpk_complex.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" // 2009 public domain wesley ebisuzaki // // note: assumption that the grib file will use 25 bits or less for storing data // (limit of bitstream unpacking routines) // note: assumption that all data can be stored as integers and have a value < INT_MAX // #define DEBUG int unpk_complex(unsigned char **sec, float *data, unsigned int ndata) { unsigned int i, j, n; int k, nbits, ref_group_length; unsigned char *p, *d, *mask_pointer; double ref_val,factor_10, factor_2, factor; float missing1, missing2; int n_sub_missing; int pack, offset; unsigned clocation; unsigned int ngroups, ref_group_width, nbit_group_width, len_last, npnts; int nbits_group_len, group_length_factor; int *group_refs, *group_widths, *group_lengths, *group_offset, *udata; unsigned int *group_clocation, *group_location; int m1, m2, mask, last, penultimate; int extra_vals[2]; int min_val; int ctable_5_4, ctable_5_6, bitmap_flag, extra_octets; extra_vals[0] = extra_vals[1] = 0; pack = code_table_5_0(sec); if (pack != 2 && pack != 3) return 0; p = sec[5]; ref_val = ieee2flt(p+11); factor_2 = Int_Power(2.0, int2(p+15)); factor_10 = Int_Power(10.0, -int2(p+17)); ref_val *= factor_10; factor = factor_2 * factor_10; nbits = p[19]; ngroups = uint4(p+31); bitmap_flag = code_table_6_0(sec); ctable_5_6 = code_table_5_6(sec); if (pack == 3 && (ctable_5_6 != 1 && ctable_5_6 != 2)) fatal_error_i("unsupported: code table 5.6=%d", ctable_5_6); extra_octets = (pack == 2) ? 0 : sec[5][48]; if (ngroups == 0) { if (bitmap_flag == 255) { for (i = 0; i < ndata; i++) data[i] = ref_val; return 0; } if (bitmap_flag == 0 || bitmap_flag == 254) { mask_pointer = sec[6] + 6; mask = 0; for (i = 0; i < ndata; i++) { if ((i & 7) == 0) mask = *mask_pointer++; data[i] = (mask & 128) ? ref_val : UNDEFINED; mask <<= 1; } return 0; } fatal_error("unknown bitmap", ""); } ctable_5_4 = code_table_5_4(sec); ref_group_width = p[35]; nbit_group_width = p[36]; ref_group_length = uint4(p+37); group_length_factor = p[41]; len_last = uint4(p+42); nbits_group_len = p[46]; #ifdef DEBUG fprintf(stderr,"ctable 5.4 %d ref_group_width %u nbit_group_width %u ref_group_length %u group_length_factor %d\n", ctable_5_4, ref_group_width, nbit_group_width, ref_group_length, group_length_factor); fprintf(stderr,"len_last %u nbit_group_len %u\n", len_last, nbits_group_len); #endif npnts = GB2_Sec5_nval(sec); // number of defined points n_sub_missing = sub_missing_values(sec, &missing1, &missing2); // allocate group widths and group lengths group_refs = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_widths = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_lengths = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_location = (unsigned int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_clocation = (unsigned int *) malloc(sizeof (unsigned int) * (size_t) ngroups); group_offset = (int *) malloc(sizeof (unsigned int) * (size_t) ngroups); udata = (int *) malloc(sizeof (unsigned int) * (size_t) npnts); if (group_refs == NULL || group_widths == NULL || group_lengths == NULL || group_location == NULL || group_clocation == NULL || group_offset == NULL || udata == NULL) fatal_error("unpk_complex: memory allocation",""); // read any extra values d = sec[7]+5; min_val = 0; if (extra_octets) { extra_vals[0] = uint_n(d,extra_octets); d += extra_octets; if (ctable_5_6 == 2) { extra_vals[1] = uint_n(d,extra_octets); d += extra_octets; } min_val = int_n(d,extra_octets); d += extra_octets; } if (ctable_5_4 != 1) fatal_error_i("internal decode does not support code table 5.4=%d", ctable_5_4); #pragma omp parallel { #pragma omp sections { #pragma omp section { // read the group reference values rd_bitstream(d, 0, group_refs, nbits, ngroups); } #pragma omp section { unsigned int i; // read the group widths rd_bitstream(d+(nbits*ngroups+7)/8,0,group_widths,nbit_group_width,ngroups); for (i = 0; i < ngroups; i++) group_widths[i] += ref_group_width; } #pragma omp section { unsigned int i; // read the group lengths if (ctable_5_4 == 1) { rd_bitstream(d+(nbits*ngroups+7)/8+(ngroups*nbit_group_width+7)/8, 0,group_lengths, nbits_group_len, ngroups-1); for (i = 0; i < ngroups-1; i++) { group_lengths[i] = group_lengths[i] * group_length_factor + ref_group_length; } group_lengths[ngroups-1] = len_last; } } } #pragma omp single { d += (nbits*ngroups + 7)/8 + (ngroups * nbit_group_width + 7) / 8 + (ngroups * nbits_group_len + 7) / 8; // do a check for number of grid points and size clocation = offset = n = j = 0; } #pragma omp sections { #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_location[i] = j; j += group_lengths[i]; n += group_lengths[i]*group_widths[i]; } } #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_clocation[i] = clocation; clocation = clocation + group_lengths[i]*(group_widths[i]/8) + (group_lengths[i]/8)*(group_widths[i] % 8); } } #pragma omp section { unsigned int i; for (i = 0; i < ngroups; i++) { group_offset[i] = offset; offset += (group_lengths[i] % 8)*(group_widths[i] % 8); } } } } if (j != npnts) fatal_error_u("bad complex packing: n points %u",j); if (d + (n+7)/8 - sec[7] != GB2_Sec7_size(sec)) fatal_error("complex unpacking size mismatch old test",""); if (d + clocation + (offset + 7)/8 - sec[7] != GB2_Sec7_size(sec)) fatal_error("complex unpacking size mismatch",""); #pragma omp parallel for private(i) schedule(static) for (i = 0; i < ngroups; i++) { group_clocation[i] += (group_offset[i] / 8); group_offset[i] = (group_offset[i] % 8); rd_bitstream(d + group_clocation[i], group_offset[i], udata+group_location[i], group_widths[i], group_lengths[i]); } // handle substitute, missing values and reference value if (n_sub_missing == 0) { #pragma omp parallel for private(i,k,j) for (i = 0; i < ngroups; i++) { j = group_location[i]; for (k = 0; k < group_lengths[i]; k++) { udata[j++] += group_refs[i]; } } } else if (n_sub_missing == 1) { #pragma omp parallel for private(i,m1,k,j) for (i = 0; i < ngroups; i++) { j = group_location[i]; if (group_widths[i] == 0) { m1 = (1 << nbits) - 1; if (m1 == group_refs[i]) { for (k = 0; k < group_lengths[i]; k++) udata[j++] = INT_MAX; } else { for (k = 0; k < group_lengths[i]; k++) udata[j++] += group_refs[i]; } } else { m1 = (1 << group_widths[i]) - 1; for (k = 0; k < group_lengths[i]; k++) { if (udata[j] == m1) udata[j] = INT_MAX; else udata[j] += group_refs[i]; j++; } } } } else if (n_sub_missing == 2) { #pragma omp parallel for private(i,j,k,m1,m2) for (i = 0; i < ngroups; i++) { j = group_location[i]; if (group_widths[i] == 0) { m1 = (1 << nbits) - 1; m2 = m1 - 1; if (m1 == group_refs[i] || m2 == group_refs[i]) { for (k = 0; k < group_lengths[i]; k++) udata[j++] = INT_MAX; } else { for (k = 0; k < group_lengths[i]; k++) udata[j++] += group_refs[i]; } } else { m1 = (1 << group_widths[i]) - 1; m2 = m1 - 1; for (k = 0; k < group_lengths[i]; k++) { if (udata[j] == m1 || udata[j] == m2) udata[j] = INT_MAX; else udata[j] += group_refs[i]; j++; } } } } // post processing if (pack == 3) { if (ctable_5_6 == 1) { last = extra_vals[0]; i = 0; while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[0]; break; } } while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i] += last + min_val; last = udata[i++]; } } } else if (ctable_5_6 == 2) { penultimate = extra_vals[0]; last = extra_vals[1]; i = 0; while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[0]; break; } } while (i < npnts) { if (udata[i] == INT_MAX) i++; else { udata[i++] = extra_vals[1]; break; } } for (; i < npnts; i++) { if (udata[i] != INT_MAX) { udata[i] = udata[i] + min_val + last + last - penultimate; penultimate = last; last = udata[i]; } } } else fatal_error_i("Unsupported: code table 5.6=%d", ctable_5_6); } // convert to float if (bitmap_flag == 255) { #pragma omp parallel for schedule(static) private(i) for (i = 0; i < ndata; i++) { data[i] = (udata[i] == INT_MAX) ? UNDEFINED : ref_val + udata[i] * factor; } } else if (bitmap_flag == 0 || bitmap_flag == 254) { n = 0; mask = 0; mask_pointer = sec[6] + 6; for (i = 0; i < ndata; i++) { if ((i & 7) == 0) mask = *mask_pointer++; if (mask & 128) { if (udata[n] == INT_MAX) data[i] = UNDEFINED; else data[i] = ref_val + udata[n] * factor; n++; } else data[i] = UNDEFINED; mask <<= 1; } } else fatal_error_i("unknown bitmap: %d", bitmap_flag); free(group_refs); free(group_widths); free(group_lengths); free(group_location); free(group_clocation); free(group_offset); free(udata); return 0; }
GB_binop__iseq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_fp32 // A.*B function (eWiseMult): GB_AemultB__iseq_fp32 // A*D function (colscale): GB_AxD__iseq_fp32 // D*A function (rowscale): GB_DxB__iseq_fp32 // C+=B function (dense accum): GB_Cdense_accumB__iseq_fp32 // C+=b function (dense accum): GB_Cdense_accumb__iseq_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_fp32 // C=scalar+B GB_bind1st__iseq_fp32 // C=scalar+B' GB_bind1st_tran__iseq_fp32 // C=A+scalar GB_bind2nd__iseq_fp32 // C=A'+scalar GB_bind2nd_tran__iseq_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FP32 || GxB_NO_ISEQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__iseq_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
radmin_fmt_plug.c
/* RAdmin v2.x cracker patch for JtR. Hacked together during * May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$radmin2$hash */ #if FMT_EXTERNS_H extern struct fmt_main fmt_radmin; #elif FMT_REGISTERS_H john_register_one(&fmt_radmin); #else #include "md5.h" #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP #include <omp.h> // Tuned on core i7 quad HT // 1 7445K // 16 12155K // 32 12470K ** this was chosen. // 64 12608k // 128 12508k #define OMP_SCALE 32 #endif #include "memdbg.h" #define FORMAT_LABEL "RAdmin" #define FORMAT_NAME "v2.x" #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 99 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 static struct fmt_tests radmin_tests[] = { {"$radmin2$B137F09CF92F465CABCA06AB1B283C1F", "lastwolf"}, {"$radmin2$14e897b1a9354f875df51047bb1a0765", "podebradka"}, {"$radmin2$02ba5e187e2589be6f80da0046aa7e3c", "12345678"}, {"$radmin2$b4e13c7149ebde51e510959f30319ac7", "firebaLL"}, {"$radmin2$3d2c8cae4621edf8abb081408569482b", "yamaha12345"}, {"$radmin2$60cb8e411b02c10ecc3c98e29e830de8", "xplicit"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH+1]; static ARCH_WORD_32 (*crypt_out)[8]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *p; if (strncmp(ciphertext, "$radmin2$", 9)) return 0; p = ciphertext + 9; if (strlen(p) != CIPHERTEXT_LENGTH) return 0; if (!ishex(p)) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], sizeof(saved_key[index])); MD5_Final((unsigned char *)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (*(ARCH_WORD_32 *)binary == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *(ARCH_WORD_32 *)binary == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void radmin_set_key(char *key, int index) { // this code assures that both saved_key[index] gets null-terminated (without buffer overflow) char *cp = &saved_key[index][strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1)+1]; // and is null padded up to 100 bytes. We simply clean up prior buffer, up to element 99, but that element will never be written to while (*cp) *cp++ = 0; } static char *get_key(int index) { // assured null teminated string. Just return it. return saved_key[index]; } struct fmt_main fmt_radmin = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif radmin_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, radmin_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
build_search_model.c
#include <omp.h> #include <getopt.h> #include "tldevel.h" #include "tlmisc.h" #include "tlrng.h" #include "tlseqbuffer.h" #include "randomkit.h" #include "sequence_struct.h" #include "sequence_alloc.h" #include "sequence_io.h" #include "pst_build.h" #include "model_struct.h" #include "model_io.h" #include "model_alloc.h" #include "bias_model.h" #include "thread_data.h" #include "hmm_conversion.h" #include "finite_hmm_stats.h" #include "finite_hmm_alloc.h" #include "finite_hmm_io.h" #include "finite_hmm_score.h" #include "run_score.h" struct parameters{ char* in_model; char* out_model; char* seq_db; char* cmd_line; unsigned long seed; rk_state rndstate; struct rng_state* rng; int num_threads; }; #define OPT_SEQDB 1 #define OPT_SEED 2 static int run_bsm(struct parameters* param); static int calibrate_all(struct model_bag* mb,struct seqer_thread_data** td); static void* do_calibrate_per_model(void* threadarg); //static int find_best_model(struct model_bag*mb, struct seq_buffer* sb, int* best); static int find_best_model(struct model_bag*mb, struct tl_seq_buffer* sb, int* best); static int print_help(char **argv); static void free_param(struct parameters* param); int main(int argc, char *argv[]) { struct parameters* param = NULL; int c; //print_program_header(argv, "Build HDPHMM model(s)."); MMALLOC(param, sizeof(struct parameters)); param->in_model = NULL; param->out_model = NULL; param->seq_db = NULL; param->cmd_line = NULL; param->seed = 0; param->num_threads = 8; param->rng = NULL; while (1){ static struct option long_options[] ={ {"in",required_argument,0,'i'}, {"out",required_argument,0,'o'}, {"seqdb",required_argument,0,OPT_SEQDB}, {"seed",required_argument,0,OPT_SEED}, {"nthreads",required_argument,0,'t'}, {"help",0,0,'h'}, {0, 0, 0, 0} }; int option_index = 0; c = getopt_long_only (argc, argv,"i:o:t:h",long_options, &option_index); if (c == -1){ break; } switch(c) { case OPT_SEQDB: param->seq_db = optarg; break; case OPT_SEED: param->seed = atoi(optarg); break; case 'i': param->in_model = optarg; break; case 'o': param->out_model = optarg; break; case 't': param->num_threads = atoi(optarg); break; case 'h': RUN(print_help(argv)); MFREE(param); exit(EXIT_SUCCESS); break; default: ERROR_MSG("not recognized"); break; } } LOG_MSG("Starting run"); if(!param->in_model){ RUN(print_help(argv)); ERROR_MSG("No input file! use --in <model.h5>"); }else{ if(!my_file_exists(param->in_model)){ ERROR_MSG("File %s does not exist.", param->in_model); } } if(!param->out_model){ RUN(print_help(argv)); ERROR_MSG("No output file! use --out <searchmodel.h5>"); }else{ if(my_file_exists(param->out_model)){ ERROR_MSG("File %s already exists.", param->out_model); } } if(!param->seq_db){ RUN(print_help(argv)); ERROR_MSG("No seqDB use --seqdb <blah.fa>"); }else{ if(!my_file_exists(param->seq_db)){ RUN(print_help(argv)); ERROR_MSG("The file <%s> does not exist.",param->seq_db); } } if(param->seed){ RUNP(param->rng = init_rng(param->seed)); rk_seed(param->seed, &param->rndstate); }else{ RUNP(param->rng = init_rng(0)); rk_randomseed(&param->rndstate); } RUN(make_cmd_line(&param->cmd_line,argc,argv)); RUN(run_bsm(param)); free_param(param); return EXIT_SUCCESS; ERROR: free_param(param); return EXIT_FAILURE; } int run_bsm(struct parameters* param) { struct model_bag* model_bag = NULL; struct tl_seq_buffer* sb = NULL; struct seqer_thread_data** td = NULL; struct fhmm* bias = NULL; double* s = NULL; int i; int best; /* read sequences from in model */ RUNP(sb = get_sequences_from_hdf5_model(param->in_model, IHMM_SEQ_READ_ONLY_SEQ)); //RUN(convert_ihmm_seq_buf_into_tl_seq_buf(s, &sb)); /*LOG_MSG("%d",sb->L); for(i = 0; i < sb->num_seq;i++){ LOG_MSG("%s",sb->sequences[i]->name); }*/ /* train PST */ RUN(create_pst_model(param->rng,sb, NULL, param->seq_db, param->out_model,0.00001, 0.01, 20.0)); //sb = NULL; /* read all models */ RUNP(model_bag = read_model_bag_hdf5(param->in_model )); RUN(create_seqer_thread_data(&td,param->num_threads, 1024 , 128, &param->rndstate)); /* convert to fhmmm */ RUN(convert_ihmm_to_fhmm_models(model_bag)); /* calibrate */ RUN(calibrate_all(model_bag, td)); /* WARNING NEED TO ADD STORAGE FOR SCORES !!!! */ //RUN(add_multi_model_label_and_u(s, model_bag->num_models)); for(i = 0; i < sb->num_seq;i++){ s = NULL; MMALLOC(s, sizeof(double) * model_bag->num_models); sb->sequences[i]->data = s; } /* score all training sequences */ RUN(run_score_sequences( model_bag->finite_models,sb, td, model_bag->num_models, FHMM_SCORE_P_LODD)); /* assign best */ RUN(find_best_model(model_bag, sb, &best)); LOG_MSG("Best model: %d",best); //write RUN(build_bias_model(model_bag->finite_models[best], &bias)); RUN(write_biashmm(param->out_model, bias)); RUN(write_searchfhmm(param->out_model, model_bag->finite_models[best])); for(i = 0; i < sb->num_seq;i++){ MFREE(sb->sequences[i]->data); sb->sequences[i]->data = NULL; } free_tl_seq_buffer(sb); free_model_bag(model_bag); free_seqer_thread_data(td); free_fhmm(bias); return OK; ERROR: return FAIL; } int find_best_model(struct model_bag*mb, struct tl_seq_buffer* sb, int* best) { double* total_e = NULL; double* s; int i,j; double min; RUN(galloc(&total_e, mb->num_models)); for(j = 0; j < mb->num_models;j++){ total_e[j] = 0.0; } for(i= 0 ;i < sb->num_seq;i++){ s = sb->sequences[i]->data; for(j = 0; j < mb->num_models;j++){ total_e[j] += s[j]; //fprintf(stdout,"%f %f ", s->sequences[i]->score_arr[j], esl_exp_logsurv(s->sequences[i]->score_arr[j], mb->finite_models[j]->tau,mb->finite_models[j]->lambda)); } //fprintf(stdout,"\n"); } j = -1; min = 1.0; for(i = 0; i < mb->num_models;i++){ LOG_MSG(" Model %d: %d states: %f", i, mb->finite_models[i]->K, total_e[i]); if(total_e[i] < min){ min = total_e[i]; j =i; } } gfree(total_e); *best = j; return OK; ERROR: return FAIL; } /* calibrate all models */ int calibrate_all(struct model_bag* mb,struct seqer_thread_data** td) { int i,j,c; int num_threads = td[0]->num_threads; int run; ASSERT(mb != NULL, "No models"); c = 0; for(run = 0; run < mb->num_models;run+= num_threads){ j = 0; for(i = 0; i < num_threads;i++){ td[i]->thread_ID = i; td[i]->model_ID = c; td[i]->fhmm = mb->finite_models; //td[i]->sb = sb; //LOG_MSG("Cal %d",c); j++; c++; if(c == mb->num_models){ break; } } #ifdef HAVE_OPENMP omp_set_num_threads( MACRO_MIN(num_threads,j)); #pragma omp parallel shared(td) private(i) { #pragma omp for schedule(dynamic) nowait #endif for(i = 0; i < j;i++){ do_calibrate_per_model(td[i]); } #ifdef HAVE_OPENMP } #endif } return OK; ERROR: return FAIL; } void* do_calibrate_per_model(void* threadarg) { struct seqer_thread_data *data; data = (struct seqer_thread_data *) threadarg; int r; r = rk_random(&data->rndstate); fhmm_calibrate(data->fhmm[data->model_ID], data->fmat, r); //LOG_MSG("Model %d: %f %f", data->model_ID, data->fhmm[data->model_ID]->lambda,data->fhmm[data->model_ID]->tau); return NULL; } int print_help(char **argv) { const char usage[] = " -i <ihmm model> -out <search model>"; char* tmp = NULL; RUN(tlfilename(argv[0], &tmp)); fprintf(stdout,"\nUsage: %s [-options] %s\n\n",tmp,usage); fprintf(stdout,"Options:\n\n"); fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--seqdb","Reference database." ,"[8]" ); fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--nthreads","Number of threads." ,"[8]" ); fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--seed","Seed" ,"[NA]" ); MFREE(tmp); return OK; ERROR: MFREE(tmp); return FAIL; } void free_param(struct parameters* param) { if(param){ if(param->cmd_line){ gfree(param->cmd_line); } if(param->rng){ free_rng(param->rng); } MFREE(param); } }