source
stringlengths
3
92
c
stringlengths
26
2.25M
IdentifyLeafHyperarcsWorklet.h
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ // Copyright (c) 2018, The Regents of the University of California, through // Lawrence Berkeley National Laboratory (subject to receipt of any required approvals // from the U.S. Dept. of Energy). All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // (1) Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // (2) Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // (3) Neither the name of the University of California, Lawrence Berkeley National // Laboratory, U.S. Dept. of Energy nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // //============================================================================= // // This code is an extension of the algorithm presented in the paper: // Parallel Peak Pruning for Scalable SMP Contour Tree Computation. // Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. // Proceedings of the IEEE Symposium on Large Data Analysis and Visualization // (LDAV), October 2016, Baltimore, Maryland. // // The PPP2 algorithm and software were jointly developed by // Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and // Oliver Ruebel (LBNL) //============================================================================== #ifndef vtk_m_worklet_contourtree_distributed_tree_grafter_identify_leaf_hyperarcs_worklet_h #define vtk_m_worklet_contourtree_distributed_tree_grafter_identify_leaf_hyperarcs_worklet_h #include <vtkm/worklet/WorkletMapField.h> #include <vtkm/worklet/contourtree_augmented/Types.h> namespace vtkm { namespace worklet { namespace contourtree_distributed { namespace tree_grafter { /// \brief Worklet implementing the TreeGrafter.IdentifyLeafHyperarcs function /// /// At this stage, we have: /// i. hierarchicalRegularID set for any supernode stored at all in the parent /// ii. hierarchicalSuperID set for any supernode that is a supernode in the parent /// iii. hierarchicalHyperParent set for any attachment point /// iv. supernodeType set to indicate what type of supernode /// v. up/dn neighbours set for all supernodes /// /// at the end of the chain collapse, the up/down neighbours define the start & end of the hyperarc /// one end may be a leaf, in which case we can transfer the hyperarc /// note that because we are grafting, we have a guarantee that they can't both be leaves /// we therefore: /// a. for leaves, determine whether up or down hyperarc, create hyperarc /// b. for regular vertices pointing to a leaf hyperarc, set superarc / hyperparent /// c. for other vertices, ignore class IdentifyLeafHyperarcsWorklet : public vtkm::worklet::WorkletMapField { public: using ControlSignature = void( FieldIn activeSuperarcs, // input iteration index. loop to one less than ContourTree->Supernodes.GetNumberOfValues() WholeArrayIn supernodeType, // input WholeArrayIn upNeighbour, // input WholeArrayIn downNeighbour, // input WholeArrayOut hierarchicalHyperparent, //output WholeArrayOut hierarchicalHyperarcPortal, // output WholeArrayOut whenTransferredPortal // output ); using ExecutionSignature = void(_1, _2, _3, _4, _5, _6, _7); using InputDomain = _1; // Default Constructor VTKM_EXEC_CONT IdentifyLeafHyperarcsWorklet(const vtkm::Id& numTransferIterations) : NumTransferIterations(numTransferIterations) { } template <typename InFieldPortalType, typename OutFieldPortalType> VTKM_EXEC void operator()(const vtkm::worklet::contourtree_augmented::EdgePair& activeSuperarc, const InFieldPortalType supernodeTypePortal, const InFieldPortalType upNeighbourPortal, const InFieldPortalType downNeighbourPortal, const OutFieldPortalType& hierarchicalHyperparentPortal, const OutFieldPortalType& hierarchicalHyperarcPortal, const OutFieldPortalType& whenTransferredPortal) const { // operator () // per active superarc // retrieve the supernode IDs for the two ends vtkm::Id low = activeSuperarc.first; vtkm::Id high = activeSuperarc.second; // test whether the top end is an upper leaf switch (supernodeTypePortal.Get(high)) { // switch on upper end case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_UPPER_LEAF: { // upper end is a leaf // in lower leaf rounds, never recognise these hierarchicalHyperparentPortal.Set(high, high); hierarchicalHyperarcPortal.Set( high, vtkm::worklet::contourtree_augmented::MaskedIndex(downNeighbourPortal.Get(high))); whenTransferredPortal.Set( high, this->NumTransferIterations | vtkm::worklet::contourtree_augmented::IS_HYPERNODE); break; } // upper end is a leaf case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_REGULAR: { // upper end is regular // notice that this is redundant, so will be set from both arcs // this is parallel safe, because it sets the same value anyway // testing would be more complex // find the up & down neighbours vtkm::Id upNbr = vtkm::worklet::contourtree_augmented::MaskedIndex(upNeighbourPortal.Get(high)); vtkm::Id downNbr = vtkm::worklet::contourtree_augmented::MaskedIndex(downNeighbourPortal.Get(high)); // test the up neighbour first for leaf-hood // but only if the corresponding flag is true if (supernodeTypePortal.Get(upNbr) == (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_UPPER_LEAF) { // up neighbour is an upper leaf hierarchicalHyperparentPortal.Set(high, upNbr); whenTransferredPortal.Set(high, this->NumTransferIterations | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SUPERNODE); } // up neighbour is an upper leaf // then the down neighbour (cannot both be true) else if (supernodeTypePortal.Get(downNbr) == (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_LOWER_LEAF) { // down neighbour is a lower leaf hierarchicalHyperparentPortal.Set(high, downNbr); whenTransferredPortal.Set(high, this->NumTransferIterations | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SUPERNODE); } // down neighbour is a lower leaf break; } // case: upper end is regular // all other cases do nothing case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SADDLE: case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_ATTACHMENT: case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_LOWER_LEAF: default: break; } // switch on upper end // test whether the bottom end is a lower leaf switch (supernodeTypePortal.Get(low)) { // switch on lower end case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_LOWER_LEAF: { // lower end is a leaf hierarchicalHyperparentPortal.Set(low, low); hierarchicalHyperarcPortal.Set( low, vtkm::worklet::contourtree_augmented::MaskedIndex(upNeighbourPortal.Get(low)) | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_ASCENDING); whenTransferredPortal.Set(low, this->NumTransferIterations | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_HYPERNODE); break; } // lower end is a leaf case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_REGULAR: { // lower end is regular // notice that this is redundant, so will be set from both arcs // this is parallel safe, because it sets the same value anyway // testing would be more complex // find the up & down neighbours vtkm::Id upNbr = vtkm::worklet::contourtree_augmented::MaskedIndex(upNeighbourPortal.Get(low)); vtkm::Id downNbr = vtkm::worklet::contourtree_augmented::MaskedIndex(downNeighbourPortal.Get(low)); // test the up neighbour first for leaf-hood if (supernodeTypePortal.Get(upNbr) == (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_UPPER_LEAF) { // up neighbour is an upper leaf hierarchicalHyperparentPortal.Set(low, upNbr); whenTransferredPortal.Set(low, this->NumTransferIterations | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SUPERNODE); } // up neighbour is an upper leaf // then the down neighbour (cannot both be true) else if (supernodeTypePortal.Get(downNbr) == (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_LOWER_LEAF) { // down neighbour is a lower leaf hierarchicalHyperparentPortal.Set(low, downNbr); whenTransferredPortal.Set(low, this->NumTransferIterations | (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SUPERNODE); } // down neighbour is a lower leaf break; } // lower end is regular // all other cases do nothing case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_SADDLE: case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_ATTACHMENT: case (vtkm::Id)vtkm::worklet::contourtree_augmented::IS_UPPER_LEAF: default: break; } // switch on lower end // In serial this worklet implements the following operation /* #pragma omp parallel for for (indexType activeSuper = 0; activeSuper < activeSuperarcs.size(); activeSuper++) { // per active superarc // retrieve the supernode IDs for the two ends indexType low = activeSuperarcs[activeSuper].low; indexType high = activeSuperarcs[activeSuper].high; // test whether the top end is an upper leaf switch (supernodeType[high]) { // switch on upper end case IS_UPPER_LEAF: { // upper end is a leaf // in lower leaf rounds, never recognise these hierarchicalHyperparent[high] = high; hierarchicalHyperarc[high] = maskedIndex(downNeighbour[high]); whenTransferred[high] = nTransferIterations | IS_HYPERNODE; break; } // upper end is a leaf case IS_REGULAR: { // upper end is regular // notice that this is redundant, so will be set from both arcs // this is parallel safe, because it sets the same value anyway // testing would be more complex // find the up & down neighbours indexType upNbr = maskedIndex(upNeighbour[high]); indexType downNbr = maskedIndex(downNeighbour[high]); // test the up neighbour first for leaf-hood // but only if the corresponding flag is true if (supernodeType[upNbr] == IS_UPPER_LEAF) { // up neighbour is an upper leaf hierarchicalHyperparent[high] = upNbr; whenTransferred[high] = nTransferIterations | IS_SUPERNODE; } // up neighbour is an upper leaf // then the down neighbour (cannot both be true) else if (supernodeType[downNbr] == IS_LOWER_LEAF) { // down neighbour is a lower leaf hierarchicalHyperparent[high] = downNbr; whenTransferred[high] = nTransferIterations | IS_SUPERNODE; } // down neighbour is a lower leaf break; } // upper end is regular // all other cases do nothing case IS_SADDLE: case IS_ATTACHMENT: case IS_LOWER_LEAF: default: break; } // switch on upper end // test whether the bottom end is a lower leaf switch (supernodeType[low]) { // switch on lower end case IS_LOWER_LEAF: { // lower end is a leaf hierarchicalHyperparent[low] = low; hierarchicalHyperarc[low] = maskedIndex(upNeighbour[low]) | IS_ASCENDING; whenTransferred[low] = nTransferIterations | IS_HYPERNODE; break; } // lower end is a leaf case IS_REGULAR: { // lower end is regular // notice that this is redundant, so will be set from both arcs // this is parallel safe, because it sets the same value anyway // testing would be more complex // find the up & down neighbours indexType upNbr = maskedIndex(upNeighbour[low]); indexType downNbr = maskedIndex(downNeighbour[low]); // test the up neighbour first for leaf-hood if (supernodeType[upNbr] == IS_UPPER_LEAF) { // up neighbour is an upper leaf hierarchicalHyperparent[low] = upNbr; whenTransferred[low] = nTransferIterations | IS_SUPERNODE; } // up neighbour is an upper leaf // then the down neighbour (cannot both be true) else if (supernodeType[downNbr] == IS_LOWER_LEAF) { // down neighbour is a lower leaf hierarchicalHyperparent[low] = downNbr; whenTransferred[low] = nTransferIterations | IS_SUPERNODE; } // down neighbour is a lower leaf break; } // lower end is regular // all other cases do nothing case IS_SADDLE: case IS_ATTACHMENT: case IS_UPPER_LEAF: default: break; } // switch on lower end } // per active superarc */ } // operator () private: vtkm::Id NumTransferIterations; }; // IdentifyLeafHyperarcsWorklet } // namespace tree_grafter } // namespace contourtree_distributed } // namespace worklet } // namespace vtkm #endif
3.norace3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) for (int k = 1; k < N; k++) A[i][j][k] = A[i][j][k - 1]; } // CHECK: Region is Data Race Free. // END
zlauum.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_lauum * * Computes the product U * U^H or L^H * L, where the triangular * factor U or L is stored in the upper or lower triangular part of * the array A. * * If uplo = 'U' or 'u' then the upper triangle of the result is stored, * overwriting the factor U in A. * If uplo = 'L' or 'l' then the lower triangle of the result is stored, * overwriting the factor L in A. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the triangular factor U or L. * On exit, if UPLO = 'U', the upper triangle of A is * overwritten with the upper triangle of the product U * U^H; * if UPLO = 'L', the lower triangle of A is overwritten with * the lower triangle of the product L^H * L. * The diagonal is assumed to be real with no imaginary part. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit. * @retval < 0 if -i, the i-th argument had an illegal value. * ******************************************************************************* * * @sa plasma_clauum * @sa plasma_dlauum * @sa plasma_slauum * ******************************************************************************/ int plasma_zlauum(plasma_enum_t uplo, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_lauum(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_zlauum(uplo, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_lauum * Computes the product U * U^H or L^H * L, where the * triangular factor U or L is stored in the upper or lower triangular part of * the array A. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * * @param[in] A * Descriptor of matrix A. * * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlauum * @sa plasma_omp_zlauum * @sa plasma_omp_dlauum * @sa plasma_omp_clauum * @sa plasma_omp_slauum * ******************************************************************************/ void plasma_omp_zlauum(plasma_enum_t uplo, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0) return; // Call the parallel function. plasma_pzlauum(uplo, A, sequence, request); }
flowinfo_metadata.c
/* * Copyright 2014-2017 Nippon Telegraph and Telephone Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file flowinfo_metadata.c * @brief Optimized flow database for dataplane, for metadata */ #include <stdlib.h> #include "openflow.h" #include "lagopus/flowdb.h" #include "pktbuf.h" #include "packet.h" #include "lagopus/flowinfo.h" #define OXM_FIELD_TYPE(field) ((field) >> 1) #define METADATA_BITLEN (64) static lagopus_result_t add_flow_metadata_mask(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_metadata_mask(struct flowinfo *, struct flow *); static struct flow * match_flow_metadata_mask(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_metadata_mask(struct flowinfo *, struct flow *); static void destroy_flowinfo_metadata_mask(struct flowinfo *); static lagopus_result_t add_flow_metadata(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_metadata(struct flowinfo *, struct flow *); static struct flow * match_flow_metadata(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_metadata(struct flowinfo *, struct flow *); static void destroy_flowinfo_metadata(struct flowinfo *); static lagopus_result_t get_match_metadata(const struct match_list *match_list, uint64_t *metadata, uint64_t *mask) { const struct match *match; TAILQ_FOREACH(match, match_list, entry) { if (match->oxm_field == (OFPXMT_OFB_METADATA << 1) + 1) { OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata)); OS_MEMCPY(mask, &match->oxm_value[8], sizeof(*mask)); break; } if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_METADATA) { OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata)); *mask = 0xffffffffffffffff; break; } } if (match == NULL) { return LAGOPUS_RESULT_NOT_FOUND; } return LAGOPUS_RESULT_OK; } struct flowinfo * new_flowinfo_metadata_mask(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { self->nflow = 0; self->nnext = 0; self->next = malloc(1); self->misc = new_flowinfo_eth_type(); self->add_func = add_flow_metadata_mask; self->del_func = del_flow_metadata_mask; self->match_func = match_flow_metadata_mask; self->find_func = find_flow_metadata_mask; self->destroy_func = destroy_flowinfo_metadata_mask; } return self; } static void destroy_flowinfo_metadata_mask(struct flowinfo *self) { struct flowinfo *flowinfo; unsigned int i; for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flowinfo->destroy_func(flowinfo); } free(self->next); free(self); } static void freeup_flowinfo(void *val) { struct flowinfo *flowinfo; flowinfo = val; flowinfo->destroy_func(flowinfo); } struct flowinfo * new_flowinfo_metadata(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD, freeup_flowinfo); /* misc is not used */ self->add_func = add_flow_metadata; self->del_func = del_flow_metadata; self->match_func = match_flow_metadata; self->find_func = find_flow_metadata; self->destroy_func = destroy_flowinfo_metadata; } return self; } static void destroy_flowinfo_metadata(struct flowinfo *self) { lagopus_hashmap_destroy(&self->hashmap, true); free(self); } static lagopus_result_t add_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { /* new node. */ flowinfo = new_flowinfo_metadata(); flowinfo->userdata = mask; self->next = realloc(self->next, (unsigned long)(self->nnext + 1) * sizeof(struct flowinfo *)); self->next[self->nnext] = flowinfo; self->nnext++; } rv = flowinfo->add_func(flowinfo, flow); } else { rv = self->misc->add_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } return rv; } static lagopus_result_t del_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return LAGOPUS_RESULT_NOT_FOUND; } rv = flowinfo->del_func(flowinfo, flow); if (flowinfo->nflow == 0) { flowinfo->destroy_func(flowinfo); self->nnext--; memmove(&self->next[i], &self->next[i + 1], (self->nnext - i) * sizeof(struct flowinfo **)); } } else { rv = self->misc->del_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } return rv; } static struct flow * match_flow_metadata_mask(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; struct flow *flow[self->nnext], *matched, *alt_flow; struct flow mismatched = { .priority = 0, .flags = 0, .idle_timeout = 0, .hard_timeout = 0, .match_list = {NULL, NULL}, .instruction_list = {NULL, NULL}, .field_bits = 0 }; unsigned int i; matched = &mismatched; //#pragma omp parallel for for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flow[i] = flowinfo->match_func(flowinfo, pkt, pri); } for (i = 0; i < self->nnext; i++) { if (flow[i] != NULL && flow[i]->priority > matched->priority) { matched = flow[i]; } } alt_flow = self->misc->match_func(self->misc, pkt, pri); if (alt_flow != NULL) { matched = alt_flow; } if (matched == &mismatched) { matched = NULL; } return matched; } static struct flow * find_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); } static lagopus_result_t add_flow_metadata(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { void *val; flowinfo = new_flowinfo_eth_type(); val = flowinfo; rv = lagopus_hashmap_add_no_lock(&self->hashmap, (void *)metadata, (void *)&val, false); if (rv != LAGOPUS_RESULT_OK) { goto out; } } rv = flowinfo->add_func(flowinfo, flow); if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } } out: return rv; } static lagopus_result_t del_flow_metadata(struct flowinfo *self, struct flow *flow) { uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { struct flowinfo *flowinfo; rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { rv = flowinfo->del_func(flowinfo, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } } return rv; } static struct flow * match_flow_metadata(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; uint64_t metadata; struct flow *flow; lagopus_result_t rv; flow = NULL; metadata = (pkt->oob_data.metadata & self->userdata); rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flow = flowinfo->match_func(flowinfo, pkt, pri); } return flow; } static struct flow * find_flow_metadata(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); }
unit_cell.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file unit_cell.h * * \brief Contains definition and partial implementation of sirius::Unit_cell class. */ #ifndef __UNIT_CELL_H__ #define __UNIT_CELL_H__ #include <algorithm> #include "descriptors.h" #include "atom_type.h" #include "atom_symmetry_class.h" #include "atom.h" #include "mpi_grid.hpp" #include "unit_cell_symmetry.h" #include "simulation_parameters.h" #include "json.hpp" namespace sirius { using json = nlohmann::json; /// Representation of a unit cell. class Unit_cell { private: /// Basic parameters of the simulation. Simulation_parameters const& parameters_; /// Mapping between atom type label and an ordered internal id in the range [0, \f$ N_{types} \f$). std::map<std::string, int> atom_type_id_map_; /// List of atom types. std::vector<Atom_type> atom_types_; /// List of atom classes. std::vector<Atom_symmetry_class> atom_symmetry_classes_; /// List of atoms. std::vector<Atom> atoms_; /// Split index of atoms. splindex<block> spl_num_atoms_; /// Global index of atom by index of PAW atom. std::vector<int> paw_atom_index_; /// Split index of PAW atoms. splindex<block> spl_num_paw_atoms_; /// Split index of atom symmetry classes. splindex<block> spl_num_atom_symmetry_classes_; /// Bravais lattice vectors in column order. /** The following convention is used to transform fractional coordinates to Cartesian: * \f[ * \vec v_{C} = {\bf L} \vec v_{f} * \f] */ matrix3d<double> lattice_vectors_; /// Inverse matrix of Bravais lattice vectors. /** This matrix is used to find fractional coordinates by Cartesian coordinates: * \f[ * \vec v_{f} = {\bf L}^{-1} \vec v_{C} * \f] */ matrix3d<double> inverse_lattice_vectors_; /// Reciprocal lattice vectors in column order. /** The following convention is used: * \f[ * \vec a_{i} \vec b_{j} = 2 \pi \delta_{ij} * \f] * or in matrix notation * \f[ * {\bf A} {\bf B}^{T} = 2 \pi {\bf I} * \f] */ matrix3d<double> reciprocal_lattice_vectors_; /// Volume \f$ \Omega \f$ of the unit cell. Volume of Brillouin zone is then \f$ (2\Pi)^3 / \Omega \f$. double omega_{0}; /// Total volume of the muffin-tin spheres. double volume_mt_{0}; /// Volume of the interstitial region. double volume_it_{0}; /// Total nuclear charge. int total_nuclear_charge_{0}; /// Total number of core electrons. double num_core_electrons_{0}; /// Total number of valence electrons. double num_valence_electrons_{0}; /// Total number of electrons. double num_electrons_{0}; /// List of equivalent atoms, provided externally. std::vector<int> equivalent_atoms_; /// Maximum number of muffin-tin points among all atom types. int max_num_mt_points_{0}; /// Total number of MT basis functions. int mt_basis_size_{0}; /// Maximum number of MT basis functions among all atoms. int max_mt_basis_size_{0}; /// Maximum number of MT radial basis functions among all atoms. int max_mt_radial_basis_size_{0}; /// Total number of augmented wave basis functions in the muffin-tins. /** This is equal to the total number of matching coefficients for each plane-wave. */ int mt_aw_basis_size_{0}; /// Total number of local orbital basis functions. int mt_lo_basis_size_{0}; /// Maximum AW basis size among all atoms. int max_mt_aw_basis_size_{0}; /// Maximum local orbital basis size among all atoms. int max_mt_lo_basis_size_{0}; /// List of nearest neighbours for each atom. std::vector<std::vector<nearest_neighbour_descriptor>> nearest_neighbours_; /// Minimum muffin-tin radius. double min_mt_radius_{0}; /// Maximum muffin-tin radius. double max_mt_radius_{0}; /// Maximum orbital quantum number of radial functions between all atom types. int lmax_{-1}; Communicator_bundle comm_bundle_atoms_; std::unique_ptr<Unit_cell_symmetry> symmetry_; Communicator const& comm_; /// Automatically determine new muffin-tin radii as a half distance between neighbor atoms. /** In order to guarantee a unique solution muffin-tin radii are dermined as a half distance * bethween nearest atoms. Initial values of the muffin-tin radii are ignored. */ std::vector<double> find_mt_radii(); /// Check if MT spheres overlap inline bool check_mt_overlap(int& ia__, int& ja__); inline int next_atom_type_id(std::string label__) { /* check if the label was already added */ if (atom_type_id_map_.count(label__) != 0) { std::stringstream s; s << "atom type with label " << label__ << " is already in list"; TERMINATE(s); } /* take text id */ atom_type_id_map_[label__] = static_cast<int>(atom_types_.size()); return atom_type_id_map_[label__]; } public: Unit_cell(Simulation_parameters const& parameters__, Communicator const& comm__) : parameters_(parameters__) , comm_(comm__) { } /// Initialize the unit cell data /** Several things must be done during this phase: * 1. Compute number of electrons * 2. Compute MT basis function indices * 3. [if needed] Scale MT radii * 4. Check MT overlap * 5. Create radial grid for each atom type * 6. Find symmetry and assign symmetry class to each atom * 7. Create split indices for atoms and atom classes */ inline void initialize(); /// Add new atom type to the list of atom types and read necessary data from the .json file inline void add_atom_type(const std::string label, const std::string file_name = "") { if (atoms_.size()) { TERMINATE("Can't add new atom type if atoms are already added"); } int id = next_atom_type_id(label); atom_types_.push_back(std::move(Atom_type(parameters_, id, label, file_name))); } /// Add new atom to the list of atom types. inline void add_atom(const std::string label, vector3d<double> position, vector3d<double> vector_field) { if (atom_type_id_map_.count(label) == 0) { std::stringstream s; s << "atom type with label " << label << " is not found"; TERMINATE(s); } if (atom_id_by_position(position) >= 0) { std::stringstream s; s << "atom with the same position is already in list" << std::endl << " position : " << position[0] << " " << position[1] << " " << position[2]; TERMINATE(s); } atoms_.push_back(std::move(Atom(atom_type(label), position, vector_field))); atom_type(label).add_atom_id(static_cast<int>(atoms_.size()) - 1); } /// Add new atom without vector field to the list of atom types. inline void add_atom(const std::string label, vector3d<double> position) { add_atom(label, position, {0, 0, 0}); } /// Add PAW atoms. inline void init_paw() { for (int ia = 0; ia < num_atoms(); ia++) { if (atom(ia).type().is_paw()) { paw_atom_index_.push_back(ia); } } spl_num_paw_atoms_ = splindex<block>(num_paw_atoms(), comm_.size(), comm_.rank()); } /// Return number of PAW atoms. inline int num_paw_atoms() const { return static_cast<int>(paw_atom_index_.size()); } /// Get split index of PAW atoms. inline splindex<block> spl_num_paw_atoms() const { return spl_num_paw_atoms_; } inline int spl_num_paw_atoms(int idx__) const { return spl_num_paw_atoms_[idx__]; } inline int paw_atom_index(int ipaw__) const { return paw_atom_index_[ipaw__]; } /// Print basic info. inline void print_info(int verbosity_); inline unit_cell_parameters_descriptor unit_cell_parameters(); /// Get crystal symmetries and equivalent atoms. /** Makes a call to spglib providing the basic unit cell information: lattice vectors and atomic types * and positions. Gets back symmetry operations and a table of equivalent atoms. The table of equivalent * atoms is then used to make a list of atom symmetry classes and related data. */ inline void get_symmetry(); /// Write structure to CIF file. inline void write_cif(); /// Write structure to JSON file. inline json serialize(); /// Set matrix of lattice vectors. /** Initializes lattice vectors, inverse lattice vector matrix, reciprocal lattice vectors and the * unit cell volume. */ inline void set_lattice_vectors(matrix3d<double> lattice_vectors__) { lattice_vectors_ = lattice_vectors__; inverse_lattice_vectors_ = inverse(lattice_vectors_); omega_ = std::abs(lattice_vectors_.det()); reciprocal_lattice_vectors_ = transpose(inverse(lattice_vectors_)) * twopi; } /// Set lattice vectors. inline void set_lattice_vectors(vector3d<double> a0__, vector3d<double> a1__, vector3d<double> a2__) { matrix3d<double> lv; for (int x : {0, 1, 2}) { lv(x, 0) = a0__[x]; lv(x, 1) = a1__[x]; lv(x, 2) = a2__[x]; } set_lattice_vectors(lv); } /// Find the cluster of nearest neighbours around each atom inline void find_nearest_neighbours(double cluster_radius); inline bool is_point_in_mt(vector3d<double> vc, int& ja, int& jr, double& dr, double tp[2]) const; inline void generate_radial_functions(); inline void generate_radial_integrals(); inline std::string chemical_formula(); inline int atom_id_by_position(vector3d<double> position__) { for (int ia = 0; ia < num_atoms(); ia++) { auto vd = atom(ia).position() - position__; if (vd.length() < 1e-10) { return ia; } } return -1; } template <typename T> inline vector3d<double> get_cartesian_coordinates(vector3d<T> a__) const { return lattice_vectors_ * a__; } inline vector3d<double> get_fractional_coordinates(vector3d<double> a) const { return inverse_lattice_vectors_ * a; } /// Unit cell volume. inline double omega() const { return omega_; } /// Number of atom types. inline int num_atom_types() const { assert(atom_types_.size() == atom_type_id_map_.size()); return static_cast<int>(atom_types_.size()); } /// Return atom type instance by id. inline Atom_type& atom_type(int id__) { assert(id__ >= 0 && id__ < (int)atom_types_.size()); return atom_types_[id__]; } /// Return const atom type instance by id. inline Atom_type const& atom_type(int id__) const { assert(id__ >= 0 && id__ < (int)atom_types_.size()); return atom_types_[id__]; } /// Return atom type instance by label. inline Atom_type& atom_type(std::string const label__) { if (!atom_type_id_map_.count(label__)) { std::stringstream s; s << "atom type " << label__ << " is not found"; TERMINATE(s); } int id = atom_type_id_map_.at(label__); return atom_type(id); } /// Return const atom type instance by label. inline Atom_type const& atom_type(std::string const label__) const { if (!atom_type_id_map_.count(label__)) { std::stringstream s; s << "atom type " << label__ << " is not found"; TERMINATE(s); } int id = atom_type_id_map_.at(label__); return atom_type(id); } /// Number of atom symmetry classes. inline int num_atom_symmetry_classes() const { return static_cast<int>(atom_symmetry_classes_.size()); } /// Return const symmetry class instance by class id. inline Atom_symmetry_class const& atom_symmetry_class(int id__) const { return atom_symmetry_classes_[id__]; } /// Return symmetry class instance by class id. inline Atom_symmetry_class& atom_symmetry_class(int id__) { return atom_symmetry_classes_[id__]; } /// Number of atoms in the unit cell. inline int num_atoms() const { return static_cast<int>(atoms_.size()); } /// Return const atom instance by id. inline Atom const& atom(int id__) const { assert(id__ >= 0 && id__ < (int)atoms_.size()); return atoms_[id__]; } /// Return atom instance by id. inline Atom& atom(int id__) { assert(id__ >= 0 && id__ < (int)atoms_.size()); return atoms_[id__]; } inline int total_nuclear_charge() const { return total_nuclear_charge_; } /// Total number of electrons (core + valence). inline double num_electrons() const { return num_electrons_; } /// Number of valence electrons. inline double num_valence_electrons() const { return num_valence_electrons_; } /// Number of core electrons. inline double num_core_electrons() const { return num_core_electrons_; } /// Maximum number of muffin-tin points among all atom types. inline int max_num_mt_points() const { return max_num_mt_points_; } /// Total number of the augmented wave basis functions over all atoms. inline int mt_aw_basis_size() const { return mt_aw_basis_size_; } /// Total number of local orbital basis functions over all atoms. inline int mt_lo_basis_size() const { return mt_lo_basis_size_; } /// Total number of the muffin-tin basis functions. /** Total number of MT basis functions equals to the sum of the total number of augmented wave * basis functions and the total number of local orbital basis functions among all atoms. It controls * the size of the muffin-tin part of the first-variational states and second-variational wave functions. */ inline int mt_basis_size() const { return mt_basis_size_; } /// Maximum number of basis functions among all atom types. inline int max_mt_basis_size() const { return max_mt_basis_size_; } /// Maximum number of radial functions among all atom types. inline int max_mt_radial_basis_size() const { return max_mt_radial_basis_size_; } /// Minimum muffin-tin radius. inline double min_mt_radius() const { return min_mt_radius_; } /// Maximum muffin-tin radius. inline double max_mt_radius() const { return max_mt_radius_; } /// Maximum number of AW basis functions among all atom types. inline int max_mt_aw_basis_size() const { return max_mt_aw_basis_size_; } inline int max_mt_lo_basis_size() const { return max_mt_lo_basis_size_; } void set_equivalent_atoms(int const* equivalent_atoms__) { equivalent_atoms_.resize(num_atoms()); memcpy(&equivalent_atoms_[0], equivalent_atoms__, num_atoms() * sizeof(int)); } inline splindex<block> const& spl_num_atoms() const { return spl_num_atoms_; } inline int spl_num_atoms(int i) const { return static_cast<int>(spl_num_atoms_[i]); } inline splindex<block> const& spl_num_atom_symmetry_classes() const { return spl_num_atom_symmetry_classes_; } inline int spl_num_atom_symmetry_classes(int i) const { return static_cast<int>(spl_num_atom_symmetry_classes_[i]); } inline double volume_mt() const { return volume_mt_; } inline double volume_it() const { return volume_it_; } inline int lmax() const { return lmax_; } inline int num_nearest_neighbours(int ia) const { return static_cast<int>(nearest_neighbours_[ia].size()); } inline nearest_neighbour_descriptor const& nearest_neighbour(int i, int ia) const { return nearest_neighbours_[ia][i]; } inline Unit_cell_symmetry const& symmetry() const { return (*symmetry_); } inline matrix3d<double> const& lattice_vectors() const { return lattice_vectors_; } inline matrix3d<double> const& inverse_lattice_vectors() const { return inverse_lattice_vectors_; } inline matrix3d<double> const& reciprocal_lattice_vectors() const { return reciprocal_lattice_vectors_; } /// Return a single lattice vector. inline vector3d<double> lattice_vector(int idx__) const { return vector3d<double>(lattice_vectors_(0, idx__), lattice_vectors_(1, idx__), lattice_vectors_(2, idx__)); } inline void import(Unit_cell_input const& inp__) { if (inp__.exist_) { /* first, load all types */ for (int iat = 0; iat < (int)inp__.labels_.size(); iat++) { auto label = inp__.labels_[iat]; auto fname = inp__.atom_files_.at(label); add_atom_type(label, fname); } /* then load atoms */ for (int iat = 0; iat < (int)inp__.labels_.size(); iat++) { auto label = inp__.labels_[iat]; auto fname = inp__.atom_files_.at(label); for (size_t ia = 0; ia < inp__.coordinates_[iat].size(); ia++) { auto v = inp__.coordinates_[iat][ia]; vector3d<double> p(v[0], v[1], v[2]); vector3d<double> f(v[3], v[4], v[5]); add_atom(label, p, f); } } set_lattice_vectors(inp__.a0_, inp__.a1_, inp__.a2_); } } Simulation_parameters const& parameters() const { return parameters_; } Communicator const& comm() const { return comm_; } }; inline void Unit_cell::initialize() { PROFILE("sirius::Unit_cell::initialize"); /* split number of atom between all MPI ranks */ spl_num_atoms_ = splindex<block>(num_atoms(), comm_.size(), comm_.rank()); /* initialize atom types */ int offs_lo{0}; for (int iat = 0; iat < num_atom_types(); iat++) { atom_type(iat).init(offs_lo); max_num_mt_points_ = std::max(max_num_mt_points_, atom_type(iat).num_mt_points()); max_mt_basis_size_ = std::max(max_mt_basis_size_, atom_type(iat).mt_basis_size()); max_mt_radial_basis_size_ = std::max(max_mt_radial_basis_size_, atom_type(iat).mt_radial_basis_size()); max_mt_aw_basis_size_ = std::max(max_mt_aw_basis_size_, atom_type(iat).mt_aw_basis_size()); max_mt_lo_basis_size_ = std::max(max_mt_lo_basis_size_, atom_type(iat).mt_lo_basis_size()); lmax_ = std::max(lmax_, atom_type(iat).indexr().lmax()); offs_lo += atom_type(iat).mt_lo_basis_size(); } /* find the charges */ for (int i = 0; i < num_atoms(); i++) { total_nuclear_charge_ += atom(i).zn(); num_core_electrons_ += atom(i).type().num_core_electrons(); num_valence_electrons_ += atom(i).type().num_valence_electrons(); } num_electrons_ = num_core_electrons_ + num_valence_electrons_; /* initialize atoms */ for (int ia = 0; ia < num_atoms(); ia++) { atom(ia).init(mt_aw_basis_size_, mt_lo_basis_size_, mt_basis_size_); mt_aw_basis_size_ += atom(ia).mt_aw_basis_size(); mt_lo_basis_size_ += atom(ia).mt_lo_basis_size(); mt_basis_size_ += atom(ia).mt_basis_size(); } assert(mt_basis_size_ == mt_aw_basis_size_ + mt_lo_basis_size_); auto v0 = lattice_vector(0); auto v1 = lattice_vector(1); auto v2 = lattice_vector(2); double r = std::max(std::max(v0.length(), std::max(v1.length(), v2.length())), parameters_.parameters_input().nn_radius_); find_nearest_neighbours(r); if (parameters_.full_potential()) { /* find new MT radii and initialize radial grid */ if (parameters_.auto_rmt()) { std::vector<double> Rmt = find_mt_radii(); for (int iat = 0; iat < num_atom_types(); iat++) { //atom_type(iat).set_mt_radius(Rmt[iat]); double r0 = atom_type(iat).radial_grid().first(); atom_type(iat).set_radial_grid(radial_grid_t::exponential_grid, atom_type(iat).num_mt_points(), r0, Rmt[iat]); } } int ia, ja; if (check_mt_overlap(ia, ja)) { std::stringstream s; s << "overlaping muffin-tin spheres for atoms " << ia << "(" << atom(ia).type().symbol() << ")" << " and " << ja << "(" << atom(ja).type().symbol() << ")" << std::endl << " radius of atom " << ia << " : " << atom(ia).mt_radius() << std::endl << " radius of atom " << ja << " : " << atom(ja).mt_radius() << std::endl << " distance : " << nearest_neighbours_[ia][1].distance << " " << nearest_neighbours_[ja][1].distance; TERMINATE(s); } min_mt_radius_ = 1e100; max_mt_radius_ = 0; for (int i = 0; i < num_atom_types(); i++) { min_mt_radius_ = std::min(min_mt_radius_, atom_type(i).mt_radius()); max_mt_radius_ = std::max(max_mt_radius_, atom_type(i).mt_radius()); } } if (parameters_.use_symmetry()) { get_symmetry(); } spl_num_atom_symmetry_classes_ = splindex<block>(num_atom_symmetry_classes(), comm_.size(), comm_.rank()); volume_mt_ = 0.0; if (parameters_.full_potential()) { for (int ia = 0; ia < num_atoms(); ia++) { volume_mt_ += fourpi * std::pow(atom(ia).mt_radius(), 3) / 3.0; } } volume_it_ = omega() - volume_mt_; init_paw(); //== write_cif(); //== if (comm().rank() == 0) { //== std::ofstream ofs(std::string("unit_cell.json"), std::ofstream::out | std::ofstream::trunc); //== ofs << serialize().dump(4); //== } } inline void Unit_cell::get_symmetry() { PROFILE("sirius::Unit_cell::get_symmetry"); if (num_atoms() == 0) { return; } if (atom_symmetry_classes_.size() != 0) { atom_symmetry_classes_.clear(); for (int ia = 0; ia < num_atoms(); ia++) { atom(ia).set_symmetry_class(nullptr); } } if (symmetry_ != nullptr) { TERMINATE("Symmetry() object is already allocated"); } mdarray<double, 2> positions(3, num_atoms()); mdarray<double, 2> spins(3, num_atoms()); std::vector<int> types(num_atoms()); for (int ia = 0; ia < num_atoms(); ia++) { auto vp = atom(ia).position(); auto vf = atom(ia).vector_field(); for (int x : {0, 1, 2}) { positions(x, ia) = vp[x]; spins(x, ia) = vf[x]; } types[ia] = atom(ia).type_id(); } symmetry_ = std::unique_ptr<Unit_cell_symmetry>( new Unit_cell_symmetry(lattice_vectors_, num_atoms(), positions, spins, types, parameters_.spglib_tolerance())); int atom_class_id{-1}; std::vector<int> asc(num_atoms(), -1); for (int i = 0; i < num_atoms(); i++) { /* if symmetry class is not assigned to this atom */ if (asc[i] == -1) { /* take next id */ atom_class_id++; atom_symmetry_classes_.push_back(std::move(Atom_symmetry_class(atom_class_id, atoms_[i].type()))); /* scan all atoms */ for (int j = 0; j < num_atoms(); j++) { bool is_equal = (equivalent_atoms_.size()) ? (equivalent_atoms_[j] == equivalent_atoms_[i]) : (symmetry_->atom_symmetry_class(j) == symmetry_->atom_symmetry_class(i)); /* assign new class id for all equivalent atoms */ if (is_equal) { asc[j] = atom_class_id; atom_symmetry_classes_.back().add_atom_id(j); } } } } for (auto& e : atom_symmetry_classes_) { for (int i = 0; i < e.num_atoms(); i++) { int ia = e.atom_id(i); atoms_[ia].set_symmetry_class(&e); } } assert(num_atom_symmetry_classes() != 0); } inline std::vector<double> Unit_cell::find_mt_radii() { if (nearest_neighbours_.size() == 0) { TERMINATE("array of nearest neighbours is empty"); } std::vector<double> Rmt(num_atom_types(), 1e10); if (parameters_.auto_rmt() == 1) { for (int ia = 0; ia < num_atoms(); ia++) { int id1 = atom(ia).type_id(); if (nearest_neighbours_[ia].size() > 1) { int ja = nearest_neighbours_[ia][1].atom_id; int id2 = atom(ja).type_id(); /* don't allow spheres to touch: take a smaller value than half a distance */ double R = std::min(parameters_.rmt_max(), 0.95 * nearest_neighbours_[ia][1].distance / 2); /* take minimal R for the given atom type */ Rmt[id1] = std::min(R, Rmt[id1]); Rmt[id2] = std::min(R, Rmt[id2]); } else { Rmt[id1] = parameters_.rmt_max(); } } } if (parameters_.auto_rmt() == 2) { std::vector<double> scale(num_atom_types(), 1e10); for (int ia = 0; ia < num_atoms(); ia++) { int id1 = atom(ia).type_id(); if (nearest_neighbours_[ia].size() > 1) { int ja = nearest_neighbours_[ia][1].atom_id; int id2 = atom(ja).type_id(); double d = nearest_neighbours_[ia][1].distance; double s = 0.95 * d / (atom_type(id1).mt_radius() + atom_type(id2).mt_radius()); scale[id1] = std::min(s, scale[id1]); scale[id2] = std::min(s, scale[id2]); } else { scale[id1] = parameters_.rmt_max() / atom_type(id1).mt_radius(); } } for (int iat = 0; iat < num_atom_types(); iat++) { Rmt[iat] = std::min(parameters_.rmt_max(), atom_type(iat).mt_radius() * scale[iat]); } } /* Suppose we have 3 different atoms. First we determint Rmt between 1st and 2nd atom, * then we determine Rmt between (let's say) 2nd and 3rd atom and at this point we reduce * the Rmt of the 2nd atom. This means that the 1st atom gets a possibility to expand if * it is far from the 3rd atom. */ bool inflate = true; if (inflate) { std::vector<bool> scale_Rmt(num_atom_types(), true); for (int ia = 0; ia < num_atoms(); ia++) { int id1 = atom(ia).type_id(); if (nearest_neighbours_[ia].size() > 1) { int ja = nearest_neighbours_[ia][1].atom_id; int id2 = atom(ja).type_id(); double dist = nearest_neighbours_[ia][1].distance; if (Rmt[id1] + Rmt[id2] > dist * 0.94) { scale_Rmt[id1] = false; scale_Rmt[id2] = false; } } } for (int ia = 0; ia < num_atoms(); ia++) { int id1 = atom(ia).type_id(); if (nearest_neighbours_[ia].size() > 1) { int ja = nearest_neighbours_[ia][1].atom_id; int id2 = atom(ja).type_id(); double dist = nearest_neighbours_[ia][1].distance; if (scale_Rmt[id1]) { Rmt[id1] = std::min(parameters_.rmt_max(), 0.95 * (dist - Rmt[id2])); } } } } for (int i = 0; i < num_atom_types(); i++) { if (Rmt[i] < 0.3) { std::stringstream s; s << "muffin-tin radius for atom type " << i << " (" << atom_types_[i].label() << ") is too small: " << Rmt[i]; TERMINATE(s); } } return Rmt; } inline bool Unit_cell::check_mt_overlap(int& ia__, int& ja__) { if (num_atoms() != 0 && nearest_neighbours_.size() == 0) { TERMINATE("array of nearest neighbours is empty"); } for (int ia = 0; ia < num_atoms(); ia++) { /* first atom is always the central one itself */ if (nearest_neighbours_[ia].size() <= 1) { continue; } int ja = nearest_neighbours_[ia][1].atom_id; double dist = nearest_neighbours_[ia][1].distance; if (atom(ia).mt_radius() + atom(ja).mt_radius() >= dist) { ia__ = ia; ja__ = ja; return true; } } return false; } inline void Unit_cell::print_info(int verbosity_) { printf("\n"); printf("Unit cell\n"); for (int i = 0; i < 80; i++) { printf("-"); } printf("\n"); printf("lattice vectors\n"); for (int i = 0; i < 3; i++) { printf(" a%1i : %18.10f %18.10f %18.10f \n", i + 1, lattice_vectors_(0, i), lattice_vectors_(1, i), lattice_vectors_(2, i)); } printf("reciprocal lattice vectors\n"); for (int i = 0; i < 3; i++) { printf(" b%1i : %18.10f %18.10f %18.10f \n", i + 1, reciprocal_lattice_vectors_(0, i), reciprocal_lattice_vectors_(1, i), reciprocal_lattice_vectors_(2, i)); } printf("\n"); printf("unit cell volume : %18.8f [a.u.^3]\n", omega()); printf("1/sqrt(omega) : %18.8f\n", 1.0 / sqrt(omega())); printf("MT volume : %f (%5.2f%%)\n", volume_mt(), volume_mt() * 100 / omega()); printf("IT volume : %f (%5.2f%%)\n", volume_it(), volume_it() * 100 / omega()); printf("\n"); printf("number of atom types : %i\n", num_atom_types()); for (int i = 0; i < num_atom_types(); i++) { int id = atom_type(i).id(); printf("type id : %i symbol : %2s mt_radius : %10.6f\n", id, atom_type(i).symbol().c_str(), atom_type(i).mt_radius()); } printf("number of atoms : %i\n", num_atoms()); printf("number of symmetry classes : %i\n", num_atom_symmetry_classes()); if (!parameters_.full_potential()) { printf("number of PAW atoms : %i\n", num_paw_atoms()); } if (verbosity_ >= 2) { printf("\n"); printf("atom id position vector_field type id class id\n"); printf("----------------------------------------------------------------------------------------\n"); for (int i = 0; i < num_atoms(); i++) { auto pos = atom(i).position(); auto vf = atom(i).vector_field(); printf("%6i %f %f %f %f %f %f %6i %6i\n", i, pos[0], pos[1], pos[2], vf[0], vf[1], vf[2], atom(i).type_id(), atom(i).symmetry_class_id()); } printf("\n"); for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) { printf("class id : %i atom id : ", ic); for (int i = 0; i < atom_symmetry_class(ic).num_atoms(); i++) { printf("%i ", atom_symmetry_class(ic).atom_id(i)); } printf("\n"); } printf("\n"); printf("atom id position (Cartesian, a.u.)\n"); printf("----------------------------------------------------------------------------------------\n"); for (int i = 0; i < num_atoms(); i++) { auto pos = atom(i).position(); auto vc = get_cartesian_coordinates(pos); printf("%6i %18.12f %18.12f %18.12f\n", i, vc[0], vc[1], vc[2]); } printf("\n"); for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) { printf("class id : %i atom id : ", ic); for (int i = 0; i < atom_symmetry_class(ic).num_atoms(); i++) { printf("%i ", atom_symmetry_class(ic).atom_id(i)); } printf("\n"); } } if (symmetry_ != nullptr) { printf("\n"); printf("space group number : %i\n", symmetry_->spacegroup_number()); printf("international symbol : %s\n", symmetry_->international_symbol().c_str()); printf("Hall symbol : %s\n", symmetry_->hall_symbol().c_str()); printf("number of operations : %i\n", symmetry_->num_mag_sym()); printf("transformation matrix : \n"); auto tm = symmetry_->transformation_matrix(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { printf("%12.6f ", tm(i, j)); } printf("\n"); } printf("origin shift : \n"); auto t = symmetry_->origin_shift(); printf("%12.6f %12.6f %12.6f\n", t[0], t[1], t[2]); if (verbosity_ >= 2) { printf("symmetry operations : \n"); for (int isym = 0; isym < symmetry_->num_mag_sym(); isym++) { auto R = symmetry_->magnetic_group_symmetry(isym).spg_op.R; auto t = symmetry_->magnetic_group_symmetry(isym).spg_op.t; auto S = symmetry_->magnetic_group_symmetry(isym).spin_rotation; printf("isym : %i\n", isym); printf("R : "); for (int i = 0; i < 3; i++) { if (i) { printf(" "); } for (int j = 0; j < 3; j++) { printf("%3i ", R(i, j)); } printf("\n"); } printf("T : "); for (int j = 0; j < 3; j++) { printf("%8.4f ", t[j]); } printf("\n"); printf("S : "); for (int i = 0; i < 3; i++) { if (i) { printf(" "); } for (int j = 0; j < 3; j++) { printf("%8.4f ", S(i, j)); } printf("\n"); } printf("\n"); } } } } inline unit_cell_parameters_descriptor Unit_cell::unit_cell_parameters() { unit_cell_parameters_descriptor d; vector3d<double> v0(lattice_vectors_(0, 0), lattice_vectors_(1, 0), lattice_vectors_(2, 0)); vector3d<double> v1(lattice_vectors_(0, 1), lattice_vectors_(1, 1), lattice_vectors_(2, 1)); vector3d<double> v2(lattice_vectors_(0, 2), lattice_vectors_(1, 2), lattice_vectors_(2, 2)); d.a = v0.length(); d.b = v1.length(); d.c = v2.length(); d.alpha = std::acos(dot(v1, v2) / d.b / d.c) * 180 / pi; d.beta = std::acos(dot(v0, v2) / d.a / d.c) * 180 / pi; d.gamma = std::acos(dot(v0, v1) / d.a / d.b) * 180 / pi; return d; } inline void Unit_cell::write_cif() { if (comm_.rank() == 0) { FILE* fout = fopen("unit_cell.cif", "w"); auto d = unit_cell_parameters(); fprintf(fout, "_cell_length_a %f\n", d.a); fprintf(fout, "_cell_length_b %f\n", d.b); fprintf(fout, "_cell_length_c %f\n", d.c); fprintf(fout, "_cell_angle_alpha %f\n", d.alpha); fprintf(fout, "_cell_angle_beta %f\n", d.beta); fprintf(fout, "_cell_angle_gamma %f\n", d.gamma); // fprintf(fout, "loop_\n"); // fprintf(fout, "_symmetry_equiv_pos_as_xyz\n"); fprintf(fout, "loop_\n"); fprintf(fout, "_atom_site_label\n"); fprintf(fout, "_atom_type_symbol\n"); fprintf(fout, "_atom_site_fract_x\n"); fprintf(fout, "_atom_site_fract_y\n"); fprintf(fout, "_atom_site_fract_z\n"); for (int ia = 0; ia < num_atoms(); ia++) { auto pos = atom(ia).position(); fprintf(fout, "%i %s %f %f %f\n", ia + 1, atom(ia).type().label().c_str(), pos[0], pos[1], pos[2]); } fclose(fout); } } inline json Unit_cell::serialize() { json dict; dict["lattice_vectors"] = {{lattice_vectors_(0, 0), lattice_vectors_(1, 0), lattice_vectors_(2, 0)}, {lattice_vectors_(0, 1), lattice_vectors_(1, 1), lattice_vectors_(2, 1)}, {lattice_vectors_(0, 2), lattice_vectors_(1, 2), lattice_vectors_(2, 2)}}; dict["atom_types"] = json::array(); for (int iat = 0; iat < num_atom_types(); iat++) { dict["atom_types"].push_back(atom_type(iat).label()); } dict["atom_files"] = json::object(); for (int iat = 0; iat < num_atom_types(); iat++) { dict["atom_files"][atom_type(iat).label()] = atom_type(iat).file_name(); } dict["atoms"] = json::object(); for (int iat = 0; iat < num_atom_types(); iat++) { dict["atoms"][atom_type(iat).label()] = json::array(); for (int i = 0; i < atom_type(iat).num_atoms(); i++) { int ia = atom_type(iat).atom_id(i); auto v = atom(ia).position(); dict["atoms"][atom_type(iat).label()].push_back({v[0], v[1], v[2]}); } } return std::move(dict); } inline void Unit_cell::find_nearest_neighbours(double cluster_radius) { PROFILE("sirius::Unit_cell::find_nearest_neighbours"); auto max_frac_coord = find_translations(cluster_radius, lattice_vectors_); nearest_neighbours_.clear(); nearest_neighbours_.resize(num_atoms()); #pragma omp parallel for default(shared) for (int ia = 0; ia < num_atoms(); ia++) { auto iapos = get_cartesian_coordinates(atom(ia).position()); std::vector<nearest_neighbour_descriptor> nn; std::vector<std::pair<double, int>> nn_sort; for (int i0 = -max_frac_coord[0]; i0 <= max_frac_coord[0]; i0++) { for (int i1 = -max_frac_coord[1]; i1 <= max_frac_coord[1]; i1++) { for (int i2 = -max_frac_coord[2]; i2 <= max_frac_coord[2]; i2++) { nearest_neighbour_descriptor nnd; nnd.translation[0] = i0; nnd.translation[1] = i1; nnd.translation[2] = i2; auto vt = get_cartesian_coordinates<int>(nnd.translation); for (int ja = 0; ja < num_atoms(); ja++) { nnd.atom_id = ja; auto japos = get_cartesian_coordinates(atom(ja).position()); vector3d<double> v = japos + vt - iapos; nnd.distance = v.length(); if (nnd.distance <= cluster_radius) { nn.push_back(nnd); nn_sort.push_back(std::pair<double, int>(nnd.distance, (int)nn.size() - 1)); } } } } } std::sort(nn_sort.begin(), nn_sort.end()); nearest_neighbours_[ia].resize(nn.size()); for (int i = 0; i < (int)nn.size(); i++) { nearest_neighbours_[ia][i] = nn[nn_sort[i].second]; } } if (parameters_.control().print_neighbors_ && comm_.rank() == 0) { printf("Nearest neighbors\n"); printf("=================\n"); for (int ia = 0; ia < num_atoms(); ia++) { printf("Central atom: %s (%i)\n", atom(ia).type().symbol().c_str(), ia); for (int i = 0; i < 80; i++) { printf("-"); } printf("\n"); printf("atom ( id) D [a.u.] translation\n"); for (int i = 0; i < 80; i++) { printf("-"); } printf("\n"); for (int i = 0; i < (int)nearest_neighbours_[ia].size(); i++) { int ja = nearest_neighbours_[ia][i].atom_id; printf("%4s (%4i) %12.6f %4i %4i %4i\n", atom(ja).type().symbol().c_str(), ja, nearest_neighbours_[ia][i].distance, nearest_neighbours_[ia][i].translation[0], nearest_neighbours_[ia][i].translation[1], nearest_neighbours_[ia][i].translation[2]); } printf("\n"); } } } inline bool Unit_cell::is_point_in_mt(vector3d<double> vc, int& ja, int& jr, double& dr, double tp[2]) const { /* reduce coordinates to the primitive unit cell */ auto vr = reduce_coordinates(get_fractional_coordinates(vc)); for (int ia = 0; ia < num_atoms(); ia++) { for (int i0 = -1; i0 <= 1; i0++) { for (int i1 = -1; i1 <= 1; i1++) { for (int i2 = -1; i2 <= 1; i2++) { /* atom position */ vector3d<double> posf = vector3d<double>(i0, i1, i2) + atom(ia).position(); /* vector connecting center of atom and reduced point */ vector3d<double> vf = vr.first - posf; /* convert to spherical coordinates */ auto vs = SHT::spherical_coordinates(get_cartesian_coordinates(vf)); if (vs[0] < atom(ia).mt_radius()) { ja = ia; tp[0] = vs[1]; // theta tp[1] = vs[2]; // phi if (vs[0] < atom(ia).type().radial_grid(0)) { jr = 0; dr = 0.0; } else { for (int ir = 0; ir < atom(ia).num_mt_points() - 1; ir++) { if (vs[0] >= atom(ia).type().radial_grid(ir) && vs[0] < atom(ia).type().radial_grid(ir + 1)) { jr = ir; dr = vs[0] - atom(ia).type().radial_grid(ir); break; } } } return true; } } } } } ja = -1; jr = -1; return false; } inline void Unit_cell::generate_radial_functions() { PROFILE("sirius::Unit_cell::generate_radial_functions"); for (int icloc = 0; icloc < (int)spl_num_atom_symmetry_classes().local_size(); icloc++) { int ic = spl_num_atom_symmetry_classes(icloc); atom_symmetry_class(ic).generate_radial_functions(parameters_.valence_relativity()); } for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) { int rank = spl_num_atom_symmetry_classes().local_rank(ic); atom_symmetry_class(ic).sync_radial_functions(comm_, rank); } if (parameters_.control().verbosity_ >= 1) { runtime::pstdout pout(comm_); for (int icloc = 0; icloc < (int)spl_num_atom_symmetry_classes().local_size(); icloc++) { int ic = spl_num_atom_symmetry_classes(icloc); atom_symmetry_class(ic).write_enu(pout); } if (comm_.rank() == 0) { printf("\n"); printf("Linearization energies\n"); } } if (parameters_.control().verbosity_ >= 4 && comm_.rank() == 0) { for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) { atom_symmetry_class(ic).dump_lo(); } } } inline void Unit_cell::generate_radial_integrals() { PROFILE("sirius::Unit_cell::generate_radial_integrals"); for (int icloc = 0; icloc < spl_num_atom_symmetry_classes().local_size(); icloc++) { int ic = spl_num_atom_symmetry_classes(icloc); atom_symmetry_class(ic).generate_radial_integrals(parameters_.valence_relativity()); } for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) { int rank = spl_num_atom_symmetry_classes().local_rank(ic); atom_symmetry_class(ic).sync_radial_integrals(comm_, rank); } for (int ialoc = 0; ialoc < spl_num_atoms_.local_size(); ialoc++) { int ia = spl_num_atoms_[ialoc]; atom(ia).generate_radial_integrals(parameters_.processing_unit(), mpi_comm_self()); } for (int ia = 0; ia < num_atoms(); ia++) { int rank = spl_num_atoms().local_rank(ia); atom(ia).sync_radial_integrals(comm_, rank); } } inline std::string Unit_cell::chemical_formula() { std::string name; for (int iat = 0; iat < num_atom_types(); iat++) { name += atom_type(iat).symbol(); int n = 0; for (int ia = 0; ia < num_atoms(); ia++) { if (atom(ia).type_id() == atom_type(iat).id()) n++; } if (n != 1) { std::stringstream s; s << n; name = (name + s.str()); } } return name; } } // namespace sirius #endif // __UNIT_CELL_H__
GB_binop__pair_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_bool) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_bool) // C+=b function (dense accum): GB (_Cdense_accumb__pair_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_bool) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: bool // A type: bool // A pattern? 1 // B type: bool // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_BOOL || GxB_NO_PAIR_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
yescrypt-simd_c.h
/*- * Copyright 2009 Colin Percival * Copyright 2012-2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ /* * On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding * gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX * and XOP are of further help either way. */ #ifndef __SSE4_1__ #warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance" #endif #include <emmintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "sha256.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform_c.h" #if __STDC_VERSION__ >= 199901L /* have restrict */ #elif defined(__GNUC__) #define restrict __restrict #else #define restrict #endif #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #define PREFETCH_OUT(x, hint) /* disabled */ #ifdef __XOP__ #define ARX(out, in1, in2, s) \ out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s)); #else #define ARX(out, in1, in2, s) \ { \ __m128i T = _mm_add_epi32(in1, in2); \ out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \ out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \ } #endif #define SALSA20_2ROUNDS \ /* Operate on "columns" */ \ ARX(X1, X0, X3, 7) \ ARX(X2, X1, X0, 9) \ ARX(X3, X2, X1, 13) \ ARX(X0, X3, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x93); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x39); \ \ /* Operate on "rows" */ \ ARX(X3, X0, X1, 7) \ ARX(X2, X3, X0, 9) \ ARX(X1, X2, X3, 13) \ ARX(X0, X1, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x39); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x93); /** * Apply the salsa20/8 core to the block provided in (X0 ... X3). */ #define SALSA20_8_BASE(maybe_decl, out) \ { \ maybe_decl Y0 = X0; \ maybe_decl Y1 = X1; \ maybe_decl Y2 = X2; \ maybe_decl Y3 = X3; \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ (out)[0] = X0 = _mm_add_epi32(X0, Y0); \ (out)[1] = X1 = _mm_add_epi32(X1, Y1); \ (out)[2] = X2 = _mm_add_epi32(X2, Y2); \ (out)[3] = X3 = _mm_add_epi32(X3, Y3); \ } #define SALSA20_8(out) \ SALSA20_8_BASE(__m128i, out) /** * Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3). */ #define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \ X0 = _mm_xor_si128(X0, Z0); \ X1 = _mm_xor_si128(X1, Z1); \ X2 = _mm_xor_si128(X2, Z2); \ X3 = _mm_xor_si128(X3, Z3); \ SALSA20_8_BASE(maybe_decl, out) #define SALSA20_8_XOR_MEM(in, out) \ SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out) #define SALSA20_8_XOR_REG(out) \ SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out) typedef union { uint32_t w[16]; __m128i q[4]; } salsa20_blk_t; /** * blockmix_salsa8(Bin, Bout, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. */ static inline void blockmix_salsa8(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3; size_t i; r--; PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ X0 = Bin[r * 2 + 1].q[0]; X1 = Bin[r * 2 + 1].q[1]; X2 = Bin[r * 2 + 1].q[2]; X3 = Bin[r * 2 + 1].q[3]; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q) } /* * (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs * starting with Sandy Bridge. Additionally, PSHUFD uses separate source and * destination registers, whereas the shifts would require an extra move * instruction for our code when building without AVX. Unfortunately, PSHUFD * is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ) * and somewhat slower on some non-Intel CPUs (luckily not including AMD * Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a * win in terms of throughput or/and not needing a move instruction, we * currently use it despite of the higher latency on some older CPUs. As an * alternative, the #if below may be patched to only enable use of (V)PSHUFD * when building with SSE4.1 or newer, which is not available on older CPUs * where this instruction has higher latency. */ #if 1 #define HI32(X) \ _mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1)) #elif 0 #define HI32(X) \ _mm_srli_si128((X), 4) #else #define HI32(X) \ _mm_srli_epi64((X), 32) #endif #if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__)) /* Intel's name, also supported by recent gcc */ #define EXTRACT64(X) _mm_cvtsi128_si64(X) #elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__) /* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */ #define EXTRACT64(X) _mm_cvtsi128_si64x(X) #elif defined(__x86_64__) && defined(__SSE4_1__) /* No known bugs for this intrinsic */ #include <smmintrin.h> #define EXTRACT64(X) _mm_extract_epi64((X), 0) #elif defined(__SSE4_1__) /* 32-bit */ #include <smmintrin.h> #if 0 /* This is currently unused by the code below, which instead uses these two * intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32)) #endif #else /* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32)) #endif /* This is tunable */ #define S_BITS 8 /* Not tunable in this implementation, hard-coded in a few places */ #define S_SIMD 2 #define S_P 4 /* Number of S-boxes. Not tunable by design, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable except via S_BITS above. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8) #if !defined(__x86_64__) && defined(__SSE4_1__) /* 32-bit with SSE4.1 */ #define PWXFORM_X_T __m128i #define PWXFORM_SIMD(X, x, s0, s1) \ x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \ s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \ s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #else /* 64-bit, or 32-bit without SSE4.1 */ #define PWXFORM_X_T uint64_t #define PWXFORM_SIMD(X, x, s0, s1) \ x = EXTRACT64(X) & S_MASK2; \ s0 = *(const __m128i *)(S0 + (uint32_t)x); \ s1 = *(const __m128i *)(S1 + (x >> 32)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #endif #define PWXFORM_ROUND \ PWXFORM_SIMD(X0, x0, s00, s01) \ PWXFORM_SIMD(X1, x1, s10, s11) \ PWXFORM_SIMD(X2, x2, s20, s21) \ PWXFORM_SIMD(X3, x3, s30, s31) #define PWXFORM \ { \ PWXFORM_X_T x0, x1, x2, x3; \ __m128i s00, s01, s10, s11, s20, s21, s30, s31; \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_ROUND \ } #define XOR4(in) \ X0 = _mm_xor_si128(X0, (in)[0]); \ X1 = _mm_xor_si128(X1, (in)[1]); \ X2 = _mm_xor_si128(X2, (in)[2]); \ X3 = _mm_xor_si128(X3, (in)[3]); #define OUT(out) \ (out)[0] = X0; \ (out)[1] = X1; \ (out)[2] = X2; \ (out)[3] = X3; /** * blockmix_pwxform(Bin, Bout, r, S): * Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. */ static void blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) { blockmix_salsa8(Bin, Bout, r); return; } S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0) /* X <-- B_{r1 - 1} */ X0 = Bin[r].q[0]; X1 = Bin[r].q[1]; X2 = Bin[r].q[2]; X3 = Bin[r].q[3]; /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) } #define XOR4_2(in1, in2) \ X0 = _mm_xor_si128((in1)[0], (in2)[0]); \ X1 = _mm_xor_si128((in1)[1], (in2)[1]); \ X2 = _mm_xor_si128((in1)[2], (in2)[2]); \ X3 = _mm_xor_si128((in1)[3], (in2)[3]); static inline uint32_t blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM) { __m128i X0, X1, X2, X3; size_t i; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } else { PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) } PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q) SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q) SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } static uint32_t blockmix_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3; size_t i; if (!S) return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r], _MM_HINT_NTA) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_NTA) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } else { PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { /* X <-- H'(X \xor B_i) */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef XOR4 #define XOR4(in, out) \ (out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \ (out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \ (out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \ (out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]); static inline uint32_t blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r) { __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; r--; PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) PREFETCH(&Bin1[r * 2], _MM_HINT_T0) PREFETCH_OUT(&Bout[r], _MM_HINT_T0) PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[0].q, Bin2[0].q) SALSA20_8_XOR_REG(Bout[0].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < r;) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r + 1 + i].q) i++; /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q, Bin2[i * 2].q) SALSA20_8_XOR_REG(Bout[i].q) } /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) SALSA20_8_XOR_REG(Bout[r * 2 + 1].q) return _mm_cvtsi128_si32(X0); } #define XOR4_Y \ X0 = _mm_xor_si128(X0, Y0); \ X1 = _mm_xor_si128(X1, Y1); \ X2 = _mm_xor_si128(X2, Y2); \ X3 = _mm_xor_si128(X3, Y3); static uint32_t blockmix_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, const __m128i *restrict S) { const uint8_t * S0, * S1; __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; size_t i; if (!S) return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r); S0 = (const uint8_t *)S; S1 = (const uint8_t *)S + S_SIZE_ALL / 2; /* Convert 128-byte blocks to 64-byte blocks */ r *= 2; r--; PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) PREFETCH_OUT(&Bout[i], _MM_HINT_T0) } PREFETCH_OUT(&Bout[r], _MM_HINT_T0); /* X <-- B_{r1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* for i = 0 to r1 - 1 do */ for (i = 0; i < r; i++) { XOR4(Bin1[i].q, Bin2[i].q) /* X <-- H'(X \xor B_i) */ XOR4_Y PWXFORM /* B'_i <-- X */ OUT(Bout[i].q) } /* Last iteration of the loop above */ XOR4(Bin1[i].q, Bin2[i].q) XOR4_Y PWXFORM /* B'_i <-- H(B'_i) */ SALSA20_8(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef ARX #undef SALSA20_2ROUNDS #undef SALSA20_8 #undef SALSA20_8_XOR_ANY #undef SALSA20_8_XOR_MEM #undef SALSA20_8_XOR_REG #undef PWXFORM_SIMD_1 #undef PWXFORM_SIMD_2 #undef PWXFORM_ROUND #undef PWXFORM #undef OUT #undef XOR4 #undef XOR4_2 #undef XOR4_Y /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint32_t integerify(const salsa20_blk_t * B, size_t r) { return B[2 * r - 1].w[0]; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 128r bytes in length. The value N must be even and no * smaller than 2. The array V must be aligned to a multiple of 64 bytes, and * arrays B and XY to a multiple of at least 16 bytes (aligning them to 64 * bytes as well saves cache lines, but might result in cache bank conflicts). */ static void smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = V, * Y; uint32_t i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } if (NROM && (VROM_mask & 1)) { uint32_t n; salsa20_blk_t * V_n; const salsa20_blk_t * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); X = &V[2 * s]; if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ j = blockmix_xor(Y, V_j, X, r, 1, S); } else { /* X <-- H(X) */ blockmix(Y, X, r, S); j = integerify(X, r); } for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V_n[i * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((n + i) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 1, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); if (((N - 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; V_j = &VROM[j * s]; } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; } /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 1, S); } else if (flags & YESCRYPT_RW) { uint32_t n; salsa20_blk_t * V_n, * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[2 * s]; blockmix(Y, X, r, S); j = integerify(X, r); for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { Y = &V_n[i * s]; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 0, S); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, S); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 0, S); } else { /* 2: for i = 0 to N - 1 do */ for (i = 1; i < N - 1; i += 2) { /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[(i + 1) * s]; blockmix(Y, X, r, S); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix(X, Y, r, S); /* 4: X <-- H(X) */ X = XY; blockmix(Y, X, r, S); } /* B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r bytes in length. The value N must be a power of 2 * greater than 1. The value Nloop must be even. The array V must be aligned * to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16 * bytes (aligning them to 64 bytes as well saves cache lines, but might result * in cache bank conflicts). */ static void smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { const salsa20_blk_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 2 * r; salsa20_blk_t * X = XY, * Y = &XY[s]; uint64_t i; uint32_t j; size_t k; if (Nloop == 0) return; /* X <-- B' */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } i = Nloop / 2; /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* * Normally, NROM implies YESCRYPT_RW, but we check for these separately * because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls * operating on the entire V. */ if (NROM && (flags & YESCRYPT_RW)) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(X, V_j, Y, r, S); if (((i + 1) & VROM_mask) == 1) { const salsa20_blk_t * VROM_j; j &= NROM - 1; VROM_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, VROM_j, X, r, 1, S); } else { j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(Y, V_j, X, r, S); } j &= N - 1; V_j = &V[j * s]; } } else if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, S); if (((i + 1) & VROM_mask) == 1) { j &= NROM - 1; V_j = &VROM[j * s]; } else { j &= N - 1; V_j = &V[j * s]; } /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 1, S); j &= N - 1; V_j = &V[j * s]; } } else if (flags & YESCRYPT_RW) { /* 6: for i = 0 to N - 1 do */ do { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(X, V_j, Y, r, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(Y, V_j, X, r, S); j &= N - 1; } while (--i); } else { /* 6: for i = 0 to N - 1 do */ do { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(X, V_j, Y, r, 0, S); j &= N - 1; V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 0, S); j &= N - 1; } while (--i); } /* 10: B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage XY * must be 256r or 256rp bytes in length (the larger size is required with * OpenMP-enabled builds). The value N must be a power of 2 greater than 1. * The array V must be aligned to a multiple of 64 bytes, and arrays B and * XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well * saves cache lines and helps avoid false sharing in OpenMP-enabled builds * when p > 1, but it might also result in cache bank conflicts). */ static void smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S) { size_t s = 2 * r; uint32_t Nchunk = N / p; uint64_t Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint32_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint32_t Vchunk = i * Nchunk; uint8_t * Bp = &B[128 * r * i]; salsa20_blk_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 128, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint8_t * Bp = &B[128 * r * i]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. (This optimized implementation currently additionally * limits N to the range from 8 to 2^31, but other implementation might not.) * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ static int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint8_t * B, * S; salsa20_blk_t * V, * XY; uint8_t sha256[32]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (N > UINT32_MAX) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) { errno = EINVAL; return -1; } if ((r > SIZE_MAX / 256 / p) || (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / S_SIZE_ALL) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (NROM > UINT32_MAX) { errno = EFBIG; return -1; } if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (salsa20_blk_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL; #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint8_t *)tmp.aligned; XY = (salsa20_blk_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint8_t *)local->aligned; V = (salsa20_blk_t *)((uint8_t *)B + B_size); XY = (salsa20_blk_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint8_t *)XY + XY_size; if (t || flags) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, passwd, passwdlen); SHA256_Final(sha256, &ctx); passwd = sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size); if (t || flags) memcpy(sha256, B, sizeof(sha256)); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, &V[(size_t)2 * r * i * N], NROM, shared, &XY[(size_t)4 * r * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX ctx; HMAC_SHA256_Init(&ctx, buf, buflen); HMAC_SHA256_Update(&ctx, "WaviBanana", 10); HMAC_SHA256_Final(sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, sha256, sizeof(sha256)); SHA256_Final(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
counting.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_COUNTING_H #define RIPPLES_COUNTING_H #include <algorithm> #include <iterator> #include <omp.h> #include "spdlog/fmt/ostr.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" #include "ripples/utility.h" namespace ripples { //! \brief Count the occurrencies of vertices in the RRR sets. //! //! \tparam InItr The input sequence iterator type. //! \tparam OutItr The output sequence iterator type. //! //! \param in_begin The begin of the sequence of RRR sets. //! \param in_end The end of the sequence of RRR sets. //! \param out_begin The begin of the sequence storing the counters for each //! vertex. //! \param out_end The end of the sequence storing the counters for each vertex. template <typename InItr, typename OutItr> void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, sequential_tag &&) { using rrr_set_type = typename std::iterator_traits<InItr>::value_type; using vertex_type = typename rrr_set_type::value_type; for (; in_begin != in_end; ++in_begin) { std::for_each(in_begin->begin(), in_begin->end(), [&](const vertex_type v) { *(out_begin + v) += 1; }); } } template <typename InItr, typename OutItr> void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, size_t num_threads) { using rrr_set_type = typename std::iterator_traits<InItr>::value_type; using vertex_type = typename rrr_set_type::value_type; #pragma omp parallel num_threads(num_threads) { size_t num_elements = std::distance(out_begin, out_end); size_t threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads(); vertex_type low = num_elements * threadnum / numthreads, high = num_elements * (threadnum + 1) / numthreads; for (auto itr = in_begin; itr != in_end; ++itr) { auto begin = std::lower_bound(itr->begin(), itr->end(), low); auto end = std::upper_bound(begin, itr->end(), high - 1); std::for_each(begin, end, [&](const vertex_type v) { *(out_begin + v) += 1; }); } } } //! \brief Count the occurrencies of vertices in the RRR sets. //! //! \tparam InItr The input sequence iterator type. //! \tparam OutItr The output sequence iterator type. //! //! \param in_begin The begin of the sequence of RRR sets. //! \param in_end The end of the sequence of RRR sets. //! \param out_begin The begin of the sequence storing the counters for each //! vertex. //! \param out_end The end of the sequence storing the counters for each vertex. template <typename InItr, typename OutItr> void CountOccurrencies(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, omp_parallel_tag &&) { size_t num_threads(1); #pragma omp single { num_threads = omp_get_max_threads(); } CountOccurrencies(in_begin, in_end, out_begin, out_end, num_threads); } //! \brief Update the coverage counters. //! //! \tparam RRRsetsItrTy The iterator type of the sequence of RRR sets. //! \tparam VertexCoverageVectorTy The type of the vector storing counters. //! //! \param B The start sequence of RRRsets covered by the just selected seed. //! \param E The start sequence of RRRsets covered by the just selected seed. //! \param vertexCoverage The vector storing the counters to be updated. template <typename RRRsetsItrTy, typename VertexCoverageVectorTy> void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E, VertexCoverageVectorTy &vertexCoverage, sequential_tag &&) { for (; B != E; ++B) { for (auto v : *B) { vertexCoverage[v] -= 1; } } } template <typename RRRsetsItrTy, typename VertexCoverageVectorTy> void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E, VertexCoverageVectorTy &vertexCoverage, size_t num_threads) { for (; B != E; ++B) { #pragma omp parallel for num_threads(num_threads) for (size_t j = 0; j < (*B).size(); ++j) { vertexCoverage[(*B)[j]] -= 1; } } } //! \brief Update the coverage counters. //! //! \tparam RRRsetsItrTy The iterator type of the sequence of RRR sets. //! \tparam VertexCoverageVectorTy The type of the vector storing counters. //! //! \param B The start sequence of RRRsets covered by the just selected seed. //! \param E The start sequence of RRRsets covered by the just selected seed. //! \param vertexCoverage The vector storing the counters to be updated. template <typename RRRsetsItrTy, typename VertexCoverageVectorTy> void UpdateCounters(RRRsetsItrTy B, RRRsetsItrTy E, VertexCoverageVectorTy &vertexCoverage, omp_parallel_tag &&) { size_t num_threads(1); #pragma omp single { num_threads = omp_get_max_threads(); } UpdateCounters(B, E, vertexCoverage, num_threads); } //! \brief Initialize the Heap storage. //! //! \tparam InItr The input sequence iterator type. //! \tparam OutItr The output sequence iterator type. //! //! \param in_begin The begin of the sequence of vertex counters. //! \param in_end The end of the sequence of vertex counters. //! \param out_begin The begin of the sequence used as storage in the Heap. //! \param out_end The end of the sequence used as storage in the Heap. template <typename InItr, typename OutItr> void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, sequential_tag &&) { using value_type = typename std::iterator_traits<OutItr>::value_type; using vertex_type = typename value_type::first_type; for (vertex_type v = 0; in_begin != in_end; ++in_begin, ++v, ++out_begin) { *out_begin = {v, *in_begin}; } } template <typename InItr, typename OutItr> void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, size_t num_threads) { using value_type = typename std::iterator_traits<OutItr>::value_type; using vertex_type = typename value_type::first_type; #pragma omp parallel for num_threads(num_threads) for (vertex_type v = 0; v < std::distance(in_begin, in_end); ++v) { *(out_begin + v) = {v, *(in_begin + v)}; } } //! \brief Initialize the Heap storage. //! //! \tparam InItr The input sequence iterator type. //! \tparam OutItr The output sequence iterator type. //! //! \param in_begin The begin of the sequence of vertex counters. //! \param in_end The end of the sequence of vertex counters. //! \param out_begin The begin of the sequence used as storage in the Heap. //! \param out_end The end of the sequence used as storage in the Heap. template <typename InItr, typename OutItr> void InitHeapStorage(InItr in_begin, InItr in_end, OutItr out_begin, OutItr out_end, omp_parallel_tag &&) { size_t num_threads(1); #pragma omp single { num_threads = omp_get_max_threads(); } InitHeapStorage(in_begin, in_end, out_begin, out_end, num_threads); } } // namespace ripplse #endif /* RIPPLES_COUNTING_H */
convolution_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m128i _sum0 = _mm_setzero_si128(); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { __m128i _val = _mm_set1_epi16((short)sptr[space_ofs[k]]); // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _w = _mm_loadl_epi64((const __m128i*)kptr); _w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w)); __m128i _sl = _mm_mullo_epi16(_val, _w); __m128i _sh = _mm_mulhi_epi16(_val, _w); __m128i _s0 = _mm_unpacklo_epi16(_sl, _sh); _sum0 = _mm_add_epi32(_sum0, _s0); kptr += 4; } } _mm_storeu_si128((__m128i*)(outptr + j * 4), _sum0); } outptr += outw * 4; } } }
valid.mob5.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_256_28_28_256_3_3.h" #include "gen_ukr_A4B2gemm_1_256_28_28_256_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 28; int Ny = 28; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<256+0;c5+=256) { for(int xy5=0;xy5<784+0;xy5+=784) { for(int f5=0;f5<256+0;f5+=256) { for(int c4=c5;c4<min(256, 256+c5);c4+=256) { for(int xy4=xy5;xy4<min(784, 784+xy5);xy4+=784) { for(int f4=f5;f4<min(256, 256+f5);f4+=Tf2) { for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1) { for(int xy3=xy4;xy3<min(784, 784+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(256, Tf2+f4);f3+=Tf2) { for(int xy2=xy3;xy2<min(784, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(256, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(784, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(256, 16+f2);f1+=16) { int ctile=min(Tc1, 256-c1); int x1=xy1/28; int y1=xy1%28/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*230400+c1_1*900+1*x1*30+1*y1*1+c1_2*1; int offsetB=0+kf1_1*36864+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*200704+of1_1*784+x1*28+y1*1+of1_2*1; if(28-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(28*28-xy1>=6){ for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
hermv_c_dia_n_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Complex *x, const ALPHA_Complex beta, ALPHA_Complex *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Complex v,v_c; ALPHA_Complex val_orig = A->values[start + j]; ALPHA_Complex val_conj = {A->values[start + j].real,-A->values[start + j].imag}; alpha_mul(v, alpha, val_orig); alpha_mul(v_c, alpha, val_conj); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); alpha_madde(tmp[threadId][row_start + j], v_c, x[col_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
convolutiondepthwise_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; #if __aarch64__ float* outptr1 = out.row(1); const float* r3 = img0.row(3); for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r10 r11 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum02 "mov v19.16b, %21.16b \n" // sum03 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r12 r13 r14 r15 "mov v20.16b, %21.16b \n" // sum10 "mov v21.16b, %21.16b \n" // sum11 "mov v22.16b, %21.16b \n" // sum12 "mov v23.16b, %21.16b \n" // sum13 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %15.4s, v12.4s \n" "fmla v19.4s, %15.4s, v13.4s \n" "fmla v20.4s, %12.4s, v10.4s \n" "fmla v21.4s, %12.4s, v11.4s \n" "fmla v22.4s, %12.4s, v12.4s \n" "fmla v23.4s, %12.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %16.4s, v13.4s \n" "fmla v19.4s, %16.4s, v14.4s \n" "fmla v20.4s, %13.4s, v11.4s \n" "fmla v21.4s, %13.4s, v12.4s \n" "fmla v22.4s, %13.4s, v13.4s \n" "fmla v23.4s, %13.4s, v14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4], #32 \n" // r20 r21 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %17.4s, v14.4s \n" "fmla v19.4s, %17.4s, v15.4s \n" "fmla v20.4s, %14.4s, v12.4s \n" "fmla v21.4s, %14.4s, v13.4s \n" "fmla v22.4s, %14.4s, v14.4s \n" "fmla v23.4s, %14.4s, v15.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4] \n" // r22 r23 r24 r25 "fmla v16.4s, %18.4s, v10.4s \n" "fmla v17.4s, %18.4s, v11.4s \n" "fmla v18.4s, %18.4s, v12.4s \n" "fmla v19.4s, %18.4s, v13.4s \n" "fmla v20.4s, %15.4s, v10.4s \n" "fmla v21.4s, %15.4s, v11.4s \n" "fmla v22.4s, %15.4s, v12.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %19.4s, v11.4s \n" "fmla v17.4s, %19.4s, v12.4s \n" "fmla v18.4s, %19.4s, v13.4s \n" "fmla v19.4s, %19.4s, v14.4s \n" "fmla v20.4s, %16.4s, v11.4s \n" "fmla v21.4s, %16.4s, v12.4s \n" "fmla v22.4s, %16.4s, v13.4s \n" "fmla v23.4s, %16.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r00 r01 "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4s, v25.4s}, [%5], #32 \n" // r30 r31 "fmla v16.4s, %20.4s, v12.4s \n" "fmla v17.4s, %20.4s, v13.4s \n" "fmla v18.4s, %20.4s, v14.4s \n" "fmla v19.4s, %20.4s, v15.4s \n" "fmla v20.4s, %17.4s, v12.4s \n" "fmla v21.4s, %17.4s, v13.4s \n" "fmla v22.4s, %17.4s, v14.4s \n" "fmla v23.4s, %17.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r02 r03 r04 r05 "prfm pldl1keep, [%5, #512] \n" "ld1 {v26.4s, v27.4s, v28.4s, v29.4s}, [%5] \n" // r32 r33 r34 r35 "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %12.4s, v12.4s \n" "fmla v19.4s, %12.4s, v13.4s \n" "fmla v20.4s, %18.4s, v24.4s \n" "fmla v21.4s, %18.4s, v25.4s \n" "fmla v22.4s, %18.4s, v26.4s \n" "fmla v23.4s, %18.4s, v27.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %13.4s, v13.4s \n" "fmla v19.4s, %13.4s, v14.4s \n" "fmla v20.4s, %19.4s, v25.4s \n" "fmla v21.4s, %19.4s, v26.4s \n" "fmla v22.4s, %19.4s, v27.4s \n" "fmla v23.4s, %19.4s, v28.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %14.4s, v14.4s \n" "fmla v19.4s, %14.4s, v15.4s \n" "fmla v20.4s, %20.4s, v26.4s \n" "fmla v21.4s, %20.4s, v27.4s \n" "fmla v22.4s, %20.4s, v28.4s \n" "fmla v23.4s, %20.4s, v29.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3] \n" // r10 r11 r12 r13 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum10 "mov v19.16b, %21.16b \n" // sum11 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %12.4s, v10.4s \n" "fmla v19.4s, %12.4s, v11.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %13.4s, v11.4s \n" "fmla v19.4s, %13.4s, v12.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%4] \n" // r20 r21 r22 r23 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %18.4s, v21.4s \n" "fmla v18.4s, %15.4s, v20.4s \n" "fmla v19.4s, %15.4s, v21.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n" // r00 r01 r02 r03 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %19.4s, v22.4s \n" "fmla v18.4s, %16.4s, v21.4s \n" "fmla v19.4s, %16.4s, v22.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5] \n" // r30 r31 r32 r33 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %20.4s, v23.4s \n" "fmla v18.4s, %17.4s, v22.4s \n" "fmla v19.4s, %17.4s, v23.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %18.4s, v24.4s \n" "fmla v19.4s, %18.4s, v25.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %19.4s, v25.4s \n" "fmla v19.4s, %19.4s, v26.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %20.4s, v26.4s \n" "fmla v19.4s, %20.4s, v27.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%3] \n" // r10 r11 r12 "mov v16.16b, %21.16b \n" // sum0 "mov v17.16b, %21.16b \n" // sum1 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %12.4s, v10.4s \n" "add %3, %3, #16 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %13.4s, v11.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v20.4s, v21.4s, v22.4s}, [%4] \n" // r20 r21 r22 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %14.4s, v12.4s \n" "add %4, %4, #16 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %15.4s, v20.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%2] \n" // r00 r01 r02 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %16.4s, v21.4s \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v24.4s, v25.4s, v26.4s}, [%5] \n" // r30 r31 r32 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %17.4s, v22.4s \n" "add %2, %2, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %18.4s, v24.4s \n" "add %5, %5, #16 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %19.4s, v25.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %20.4s, v26.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"); } r0 += 2 * 4 + w * 4; r1 += 2 * 4 + w * 4; r2 += 2 * 4 + w * 4; r3 += 2 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4s, v11.4s}, [%1], #32 \n" // r00 r01 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "mov v18.16b, %17.16b \n" // sum02 "mov v19.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r02 r03 r04 r05 "fmla v16.4s, %8.4s, v10.4s \n" "fmla v17.4s, %8.4s, v11.4s \n" "fmla v18.4s, %8.4s, v12.4s \n" "fmla v19.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v16.4s, %9.4s, v11.4s \n" "fmla v17.4s, %9.4s, v12.4s \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r10 r11 "fmla v16.4s, %10.4s, v12.4s \n" "fmla v17.4s, %10.4s, v13.4s \n" "fmla v18.4s, %10.4s, v14.4s \n" "fmla v19.4s, %10.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r12 r13 r14 r15 "fmla v16.4s, %11.4s, v10.4s \n" "fmla v17.4s, %11.4s, v11.4s \n" "fmla v18.4s, %11.4s, v12.4s \n" "fmla v19.4s, %11.4s, v13.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v11.4s \n" "fmla v17.4s, %12.4s, v12.4s \n" "fmla v18.4s, %12.4s, v13.4s \n" "fmla v19.4s, %12.4s, v14.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r20 r21 "fmla v16.4s, %13.4s, v12.4s \n" "fmla v17.4s, %13.4s, v13.4s \n" "fmla v18.4s, %13.4s, v14.4s \n" "fmla v19.4s, %13.4s, v15.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r22 r23 r24 r25 "fmla v16.4s, %14.4s, v10.4s \n" "fmla v17.4s, %14.4s, v11.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %15.4s, v11.4s \n" "fmla v17.4s, %15.4s, v12.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v12.4s \n" "fmla v17.4s, %16.4s, v13.4s \n" "fmla v18.4s, %16.4s, v14.4s \n" "fmla v19.4s, %16.4s, v15.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q14 \n" "vmla.f32 q11, %q8, q15 \n" "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmov q12, %q17 \n" // sum02 "vmov q13, %q17 \n" // sum03 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q13, %q8, q15 \n" "vmla.f32 q10, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "vmla.f32 q11, %q10, q15 \n" // "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r04 r05 "vmla.f32 q13, %q9, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q10, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmla.f32 q11, %q11, q15 \n" "vmla.f32 q10, %q12, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q13, %q11, q15 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "vmla.f32 q11, %q13, q15 \n" // "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r14 r15 "vmla.f32 q13, %q12, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q13, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q11, %q14, q15 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q13, %q14, q15 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "vmla.f32 q11, %q16, q15 \n" // "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r24 r25 "vmla.f32 q13, %q15, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q16, q15 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r00 r01 r02 r03 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "fmla v16.4s, %8.4s, v12.4s \n" "fmla v17.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v14.4s \n" "fmla v17.4s, %10.4s, v15.4s \n" "add %2, %2, #32 \n" "fmla v18.4s, %11.4s, v20.4s \n" "fmla v19.4s, %11.4s, v21.4s \n" "fmla v16.4s, %12.4s, v21.4s \n" "fmla v17.4s, %12.4s, v22.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v22.4s \n" "fmla v19.4s, %13.4s, v23.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v14.4s \n" "fmla v17.4s, %16.4s, v15.4s \n" "add %3, %3, #32 \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "vmla.f32 q11, %q8, q13 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q10, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q15 \n" "vmla.f32 q10, %q11, q12 \n" "vmla.f32 q11, %q11, q13 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r12 r13 "vmla.f32 q10, %q12, q13 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q10, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q15 \n" "vmla.f32 q10, %q14, q12 \n" "vmla.f32 q11, %q14, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q16, q15 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmla v28.4s, %8.4s, v10.4s \n" "fmla v29.4s, %8.4s, v12.4s \n" "fmla v30.4s, %8.4s, v14.4s \n" "fmla v31.4s, %8.4s, v16.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" // r08 "fmla v28.4s, %9.4s, v11.4s \n" "fmla v29.4s, %9.4s, v13.4s \n" "fmla v30.4s, %9.4s, v15.4s \n" "fmla v31.4s, %9.4s, v17.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.4s, %10.4s, v12.4s \n" "fmla v29.4s, %10.4s, v14.4s \n" "fmla v30.4s, %10.4s, v16.4s \n" "fmla v31.4s, %10.4s, v18.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v28.4s, %11.4s, v20.4s \n" "fmla v29.4s, %11.4s, v22.4s \n" "fmla v30.4s, %11.4s, v24.4s \n" "fmla v31.4s, %11.4s, v26.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" // r18 "fmla v28.4s, %12.4s, v21.4s \n" "fmla v29.4s, %12.4s, v23.4s \n" "fmla v30.4s, %12.4s, v25.4s \n" "fmla v31.4s, %12.4s, v27.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.4s, %13.4s, v22.4s \n" "fmla v29.4s, %13.4s, v24.4s \n" "fmla v30.4s, %13.4s, v26.4s \n" "fmla v31.4s, %13.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v28.4s, %14.4s, v10.4s \n" "fmla v29.4s, %14.4s, v12.4s \n" "fmla v30.4s, %14.4s, v14.4s \n" "fmla v31.4s, %14.4s, v16.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v18.4s}, [%3] \n" // r28 "fmla v28.4s, %15.4s, v11.4s \n" "fmla v29.4s, %15.4s, v13.4s \n" "fmla v30.4s, %15.4s, v15.4s \n" "fmla v31.4s, %15.4s, v17.4s \n" "fmla v28.4s, %16.4s, v12.4s \n" "fmla v29.4s, %16.4s, v14.4s \n" "fmla v30.4s, %16.4s, v16.4s \n" "fmla v31.4s, %16.4s, v18.4s \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmla.f32 q10, %q8, q14 \n" "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vmov q12, %q17 \n" // sum02 "vmla.f32 q11, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r04 r05 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmov q13, %q17 \n" // sum03 "vmla.f32 q10, %q12, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r06 r07 "vmla.f32 q13, %q8, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q11, %q11, q14 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q11, %q12, q15 \n" "vld1.f32 {d28-d29}, [%1 :128] \n" // r08 "vmla.f32 q13, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r14 r15 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r16 r17 "vmla.f32 q13, %q11, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q15, q15 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r18 "vmla.f32 q13, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r24 r25 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r26 r27 "vmla.f32 q13, %q14, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q15, q15 \n" "vld1.f32 {d28-d29}, [%3 :128] \n" // r28 "vmla.f32 q13, %q16, q14 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v20.16b, %17.16b \n" // sum00 "mov v21.16b, %17.16b \n" // sum01 "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "fmla v20.4s, %8.4s, v10.4s \n" "fmla v21.4s, %8.4s, v12.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v14.4s}, [%1] \n" // r04 "fmla v22.4s, %9.4s, v11.4s \n" "fmla v23.4s, %9.4s, v13.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v12.4s \n" "fmla v21.4s, %10.4s, v14.4s \n" "fmla v22.4s, %11.4s, v16.4s \n" "fmla v23.4s, %11.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v15.4s}, [%2] \n" // r14 "fmla v20.4s, %12.4s, v17.4s \n" "fmla v21.4s, %12.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, %13.4s, v18.4s \n" "fmla v23.4s, %13.4s, v15.4s \n" "fmla v20.4s, %14.4s, v10.4s \n" "fmla v21.4s, %14.4s, v12.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v14.4s}, [%3] \n" // r24 "fmla v22.4s, %15.4s, v11.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "fmla v20.4s, %16.4s, v12.4s \n" "fmla v21.4s, %16.4s, v14.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vld1.f32 {d24-d25}, [%1 :128] \n" // r04 "vmla.f32 q11, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q12 \n" "vmla.f32 q10, %q11, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r12 r13 "vmla.f32 q10, %q12, q15 \n" "vmla.f32 q11, %q11, q12 \n" "vmla.f32 q10, %q13, q12 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r14 "vmla.f32 q11, %q12, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q10, %q14, q12 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vld1.f32 {d24-d25}, [%3 :128] \n" // r24 "vmla.f32 q11, %q15, q15 \n" "vmla.f32 q11, %q16, q12 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) { for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(4*t3+Nx-9,32));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),8*t4+6);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ex5-norm-mean-stddev-openmp.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> #define VALIDATE 0 #if VALIDATE #include "validate.h" #endif double *normrnd(const size_t, const size_t, const double, const double); double mean(const size_t, const size_t, const double * restrict); double std(const size_t, const size_t, const double, const double * restrict); void usage(char**); int main(int argc, char **argv) { double *A,avg,stddev; size_t n; double t0,t1; srand(time(0)); if(argc==2) sscanf(argv[1],"%zu",&n); else { usage(argv); return 1; } A = normrnd(n,n,0,1); /* standard normal distribution: (mu,sigma)=(0,1) */ t0 = omp_get_wtime(); avg = mean(n,n,A); stddev = std(n,n,avg,A); t1 = omp_get_wtime(); #if VALIDATE if(!validate_mean(n,n,A,avg)) { printf("Mean value validation failed.\n"); return 1; } if(!validate_std(n,n,avg,A,stddev)) { printf("Standard deviation validation failed.\n"); return 1; } #endif printf("mean(A) = %f\nstd(A) = %f\n",avg,stddev); printf("Total time taken: %f.\n",t1-t0); free(A); return 0; } double *normrnd(const size_t n, const size_t m, const double mu, const double sigma) { double *A = (double*)malloc(n*m*sizeof(double)); double u1,u2,z0; for(size_t i=0; i<n*m; ++i) { u1 = (double)rand()/(double)RAND_MAX; u2 = (double)rand()/(double)RAND_MAX; z0 = sqrt(-2*log(u1))*cos(6.28*u2); /* Box–Muller transform */ A[i] = mu+sigma*z0; } return A; } double mean(const size_t n, const size_t m, const double * restrict A) { double sum=0; size_t i; #pragma omp parallel for default(none) shared(A) private(i) reduction(+:sum) for(i=0; i<n*m; ++i) sum += A[i]; return sum/(n*m); } double std(const size_t n, const size_t m, const double avg, const double * restrict A) { double sum=0; size_t i; #pragma omp parallel for default(none) shared(A) private(i) reduction(+:sum) for(i=0; i<n*m; ++i) sum += (A[i]-avg)*(A[i]-avg); return sqrt(sum/(n*m-1)); } void usage(char **argv) { printf("Usage: %s <length>\n",argv[0]); }
Interp1PrimFifthOrderCRWENO.c
/*! @file Interp1PrimFifthOrderCRWENO.c * @brief CRWENO5 Scheme (Component-wise application to vectors). * @author Debojyoti Ghosh */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 5th order CRWENO reconstruction (component-wise) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the fifth order CRWENO scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order CRWENO numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ \frac{2}{3}\hat{\bf f}_{j-1/2} + \frac{1}{3}\hat{\bf f}_{j+1/2} = \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\ + &\ \omega_2\ \times\ \left[ \frac{1}{3}\hat{\bf f}_{j-1/2}+\frac{2}{3}\hat{\bf f}_{j+1/2} = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\ + &\ \omega_3\ \times\ \left[ \frac{2}{3}\hat{\bf f}_{j+1/2} + \frac{1}{3}\hat{\bf f}_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} \right) \right] \\ \Rightarrow &\ \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\bf f}_{j-1/2} + \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\bf f}_{j+1/2} + \frac{1}{3}\omega_3\hat{\bf f}_{j+3/2} = \frac{\omega_1}{6}{\bf f}_{j-1} + \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\bf f}_j + \frac{\omega_2+5\omega_3}{6}{\bf f}_{j+1}, \f} where \f$\omega_k; k=1,2,3\f$ are the nonlinear WENO weights computed in WENOFifthOrderCalculateWeights() (note that the \f$\omega\f$ are different for each component of the vector \f$\hat{\bf f}\f$). The resulting tridiagonal system is solved using tridiagLU() (see also #TridiagLU, tridiagLU.h). \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The scalar interpolation method is applied to the vector function in a component-wise manner. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706, http://dx.doi.org/10.1137/110857659 + Ghosh, D., Constantinescu, E. M., Brown, J., Efficient Implementation of Nonlinear Compact Schemes on Massively Parallel Platforms, SIAM Journal on Scientific Computing, 37 (3), 2015, C354–C383, http://dx.doi.org/10.1137/140989261 */ int Interp1PrimFifthOrderCRWENO( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MPIVariables *mpi = (MPIVariables*) m; CompactScheme *compact= (CompactScheme*) solver->compact; WENOParameters *weno = (WENOParameters*) solver->interp; TridiagLU *lu = (TridiagLU*) solver->lusolver; int sys,Nsys,d; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; int *stride= solver->stride_with_ghosts; /* define some constants */ static const double one_third = 1.0/3.0; static const double one_sixth = 1.0/6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* calculate total number of tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *R = compact->R; #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) for (sys=0; sys < N_outer; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1,qm2,qm3,qp1,qp2,p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); if (upw > 0) { indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 - 2*stride[dir]; qm2 = qm1 - stride[dir]; qp1 = qm1 + stride[dir]; qp2 = qm1 + 2*stride[dir]; } else { indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); qm3 = qm1 + 2*stride[dir]; qm2 = qm1 + stride[dir]; qp1 = qm1 - stride[dir]; qp2 = qm1 - 2*stride[dir]; } /* Defining stencil points */ double *fm3, *fm2, *fm1, *fp1, *fp2; fm3 = fC+qm3*nvars; fm2 = fC+qm2*nvars; fm1 = fC+qm1*nvars; fp1 = fC+qp1*nvars; fp2 = fC+qp2*nvars; /* Candidate stencils and their optimal weights*/ double f1[nvars], f2[nvars], f3[nvars]; if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ _ArrayAXBYCZ_(f1,(2*one_sixth),fm3,(-7*one_sixth) ,fm2,(11*one_sixth) ,fm1,nvars); _ArrayAXBYCZ_(f2,(-one_sixth) ,fm2,(5*one_sixth) ,fm1,(2*one_sixth) ,fp1,nvars); _ArrayAXBYCZ_(f3,(2*one_sixth),fm1,(5*one_sixth) ,fp1,(-one_sixth) ,fp2,nvars); } else { /* CRWENO5 at the interior points */ _ArrayAXBY_(f1,(one_sixth) ,fm2,(5*one_sixth),fm1,nvars); _ArrayAXBY_(f2,(5*one_sixth),fm1,(one_sixth) ,fp1,nvars); _ArrayAXBY_(f3,(one_sixth) ,fm1,(5*one_sixth),fp1,nvars); } /* retrieve the WENO weights */ double *w1, *w2, *w3; w1 = (ww1+p*nvars); w2 = (ww2+p*nvars); w3 = (ww3+p*nvars); if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { _ArraySetValue_ ((A+Nsys*indexI[dir]+sys*nvars),nvars,0.0) _ArraySetValue_ ((B+Nsys*indexI[dir]+sys*nvars),nvars,1.0) _ArraySetValue_ ((C+Nsys*indexI[dir]+sys*nvars),nvars,0.0) } else { if (upw > 0) { _ArrayAXBY_ ((A+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars); _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars); _ArrayScaleCopy1D_(w3,(one_third),(C+Nsys*indexI[dir]+sys*nvars),nvars); } else { _ArrayAXBY_ ((C+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars); _ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars); _ArrayScaleCopy1D_(w3,(one_third),(A+Nsys*indexI[dir]+sys*nvars),nvars); } } _ArrayMultiply3Add1D_ ((R+Nsys*indexI[dir]+sys*nvars),w1,f1,w2,f2,w3,f3,nvars); } } #ifdef serial /* Solve the tridiagonal system */ IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* all processes except the last will solve without the last interface to avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); } else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d]; if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]); MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ #pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI) for (sys=0; sys < N_outer; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); _ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars); } } return(0); }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
ex_particle_OPENMP_seq.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <limits.h> #include <time.h> #include <string.h> #define PI 3.1415926535897932 /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // Returns the number of seconds elapsed between the two specified times float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses binary search before switching to sequential search * @param CDF The CDF * @param beginIndex The index to start searching from * @param endIndex The index to stop searching * @param value The value to find * @return The index of value in the CDF; if value is never found, returns the last index * @warning Use at your own risk; not fully tested */ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){ if(endIndex < beginIndex) return -1; int middleIndex = beginIndex + ((endIndex - beginIndex)/2); /*check the value*/ if(CDF[middleIndex] >= value) { /*check that it's good*/ if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(middleIndex > 0 && CDF[middleIndex-1] == value) middleIndex--; return middleIndex; } } if(CDF[middleIndex] > value) return findIndexBin(CDF, beginIndex, middleIndex+1, value); return findIndexBin(CDF, middleIndex-1, endIndex, value); } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma373_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); double * u = (double *)malloc(sizeof(double)*Nparticles); int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles); { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma388_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } int k; printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x) for(x = 0; x < Nparticles; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma402_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); //particle filter likelihood { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY) for(x = 0; x < Nparticles; x++){ //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[x*countOnes + y] = fabs((double)(indX*IszY*Nfr + indY*Nfr + k)); if(ind[x*countOnes + y] >= max_size) ind[x*countOnes + y] = 0; } likelihood[x] = 0; for(y = 0; y < countOnes; y++) likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0; likelihood[x] = likelihood[x]/((double) countOnes); } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma410_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(Nparticles, weights, likelihood) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma433_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private(x) reduction(+:sumWeights) for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma440_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(sumWeights, weights) private(x) for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma446_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private(x) reduction(+:xe, ye) for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma455_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(u, u1, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma480_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); int j, i; { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j) for(j = 0; j < Nparticles; j++){ i = findIndex(CDF, Nparticles, u[j]); if(i == -1) i = Nparticles-1; xj[j] = arrayX[i]; yj[j] = arrayY[i]; } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma488_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); //#pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } free(disk); free(objxy); free(weights); free(likelihood); free(xj); free(yj); free(arrayX); free(arrayY); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter const unsigned long long full_program_start = current_time_ns(); particleFilter(I, IszX, IszY, Nfr, seed, Nparticles) ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); ; long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
compiler_cgen.c
/* Generated by Nim Compiler v0.15.0 */ /* (c) 2016 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 32 #include "nimbase.h" #include <string.h> typedef struct Tcgen530027 Tcgen530027; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct Ropeobj179006 Ropeobj179006; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct Cell47305 Cell47305; typedef struct Cellseq47321 Cellseq47321; typedef struct Gcheap49818 Gcheap49818; typedef struct Gcstack49816 Gcstack49816; typedef struct Memregion29485 Memregion29485; typedef struct Smallchunk29439 Smallchunk29439; typedef struct Llchunk29479 Llchunk29479; typedef struct Bigchunk29441 Bigchunk29441; typedef struct Intset29414 Intset29414; typedef struct Trunk29410 Trunk29410; typedef struct Avlnode29483 Avlnode29483; typedef struct Gcstat49814 Gcstat49814; typedef struct Cellset47317 Cellset47317; typedef struct Pagedesc47313 Pagedesc47313; typedef struct Ttypeseq293836 Ttypeseq293836; typedef struct Ttype293840 Ttype293840; typedef struct Intset269030 Intset269030; typedef struct Trunk269026 Trunk269026; typedef struct Trunkseq269028 Trunkseq269028; typedef struct Tpasscontext342002 Tpasscontext342002; typedef struct Tsym293834 Tsym293834; typedef struct Tidobj200004 Tidobj200004; typedef struct TNimObject TNimObject; typedef struct TY293929 TY293929; typedef struct Tstrtable293806 Tstrtable293806; typedef struct Tsymseq293804 Tsymseq293804; typedef struct Tident200010 Tident200010; typedef struct Tlineinfo192336 Tlineinfo192336; typedef struct Tnode293802 Tnode293802; typedef struct Tloc293816 Tloc293816; typedef struct Tlib293820 Tlib293820; typedef struct TY530153 TY530153; typedef struct TY204018 TY204018; typedef struct Tidtable293850 Tidtable293850; typedef struct Tidpairseq293848 Tidpairseq293848; typedef struct Tlinkedlist147013 Tlinkedlist147013; typedef struct Tlistentry147007 Tlistentry147007; typedef struct Tcproc530021 Tcproc530021; typedef struct Tnodetable293862 Tnodetable293862; typedef struct Tnodepairseq293860 Tnodepairseq293860; typedef struct Debuginfo204009 Debuginfo204009; typedef struct TY204021 TY204021; typedef struct TY204023 TY204023; typedef struct Tnodeseq293796 Tnodeseq293796; typedef struct TY192350 TY192350; typedef struct TY530095 TY530095; typedef struct Trodreader333021 Trodreader333021; typedef struct TY293960 TY293960; typedef struct TY204017 TY204017; typedef struct Enumdesc204007 Enumdesc204007; typedef struct Tinfocc274008 Tinfocc274008; typedef struct Tblock530019 Tblock530019; typedef struct Ttraversalclosure538019 Ttraversalclosure538019; typedef struct TY135002 TY135002; typedef struct Tbitset340004 Tbitset340004; typedef struct TY192612 TY192612; typedef struct Tfileinfo192334 Tfileinfo192334; typedef struct Tinfoos177035 Tinfoos177035; typedef struct Tinfocpu177476 Tinfocpu177476; typedef struct Tstrentry147009 Tstrentry147009; typedef struct TY128506 TY128506; typedef struct Basechunk29437 Basechunk29437; typedef struct Freecell29429 Freecell29429; typedef struct Tinstantiation293824 Tinstantiation293824; typedef struct Tidpair293846 Tidpair293846; typedef struct Tnodepair293858 Tnodepair293858; typedef struct Filenamemapping204005 Filenamemapping204005; typedef struct TY333033 TY333033; typedef struct Tindex333019 Tindex333019; typedef struct Tiitable300142 Tiitable300142; typedef struct Tiipairseq300140 Tiipairseq300140; typedef struct Table333054 Table333054; typedef struct Keyvaluepairseq333057 Keyvaluepairseq333057; typedef struct Memfile331202 Memfile331202; typedef struct TY293961 TY293961; typedef struct Tiipair300138 Tiipair300138; typedef struct Keyvaluepair333060 Keyvaluepair333060; typedef NU8 Tnimkind3403; typedef NU8 Tnimtypeflag3409Set; typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0); typedef N_NIMCALL_PTR(void*, TY3494) (void* p0); struct TNimType { NI size; Tnimkind3403 kind; Tnimtypeflag3409Set flags; TNimType* base; TNimNode* node; void* finalizer; TY3489 marker; TY3494 deepcopy; }; typedef NU8 Tnimnodekind3405; struct TNimNode { Tnimnodekind3405 kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void); struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct Cell47305 { NI refcount; TNimType* typ; }; struct Cellseq47321 { NI len; NI cap; Cell47305** d; }; typedef Smallchunk29439* TY29500[512]; typedef Trunk29410* Trunkbuckets29412[256]; struct Intset29414 { Trunkbuckets29412 data; }; struct Memregion29485 { NI minlargeobj; NI maxlargeobj; TY29500 freesmallchunks; Llchunk29479* llmem; NI currmem; NI maxmem; NI freemem; NI lastsize; Bigchunk29441* freechunkslist; Intset29414 chunkstarts; Avlnode29483* root; Avlnode29483* deleted; Avlnode29483* last; Avlnode29483* freeavlnodes; NIM_BOOL locked; }; struct Gcstat49814 { NI stackscans; NI cyclecollections; NI maxthreshold; NI maxstacksize; NI maxstackcells; NI cycletablesize; NI64 maxpause; }; struct Cellset47317 { NI counter; NI max; Pagedesc47313* head; Pagedesc47313** data; }; struct Gcheap49818 { Gcstack49816* stack; void* stackbottom; NI cyclethreshold; Cellseq47321 zct; Cellseq47321 decstack; Cellseq47321 tempstack; NI recgclock; Memregion29485 region; Gcstat49814 stat; Cellset47317 marked; Cellseq47321 additionalroots; }; struct Intset269030 { NI counter; NI max; Trunk269026* head; Trunkseq269028* data; }; struct TNimObject { TNimType* m_type; }; struct Tidobj200004 { TNimObject Sup; NI id; }; typedef NU8 Tsymkind293435; struct Tstrtable293806 { NI counter; Tsymseq293804* data; }; typedef NU16 Tmagic293524; struct Tlineinfo192336 { NI16 line; NI16 col; NI32 fileindex; }; typedef NU32 Tsymflag293184Set; typedef NU32 Toption170009Set; typedef NU8 Tlockind293808; typedef NU8 Tstorageloc293812; typedef NU16 Tlocflag293810Set; struct Tloc293816 { Tlockind293808 k; Tstorageloc293812 s; Tlocflag293810Set flags; Ttype293840* t; Ropeobj179006* r; }; struct Tsym293834 { Tidobj200004 Sup; Tsymkind293435 kind; union{ struct {Ttypeseq293836* typeinstcache; } S1; struct {TY293929* procinstcache; Tsym293834* gcunsafetyreason; } S2; struct {TY293929* usedgenerics; Tstrtable293806 tab; } S3; struct {Tsym293834* guard; NI bitsize; } S4; } kindU; Tmagic293524 magic; Ttype293840* typ; Tident200010* name; Tlineinfo192336 info; Tsym293834* owner; Tsymflag293184Set flags; Tnode293802* ast; Toption170009Set options; NI position; NI offset; Tloc293816 loc; Tlib293820* annex; Tnode293802* constraint; }; struct TY204018 { NimStringDesc* Field0; NI Field1; }; struct Tpasscontext342002 { TNimObject Sup; NIM_BOOL fromcache; }; typedef Ropeobj179006* Tcfilesections530009[18]; typedef NU8 Codegenflag530025Set; struct Tidtable293850 { NI counter; Tidpairseq293848* data; }; struct Tlinkedlist147013 { Tlistentry147007* head; Tlistentry147007* tail; NI counter; }; struct Tnodetable293862 { NI counter; Tnodepairseq293860* data; }; typedef Ropeobj179006* TY530136[10]; struct Tcgen530027 { Tpasscontext342002 Sup; Tcfilesections530009 s; Codegenflag530025Set flags; Tsym293834* module; NimStringDesc* filename; NimStringDesc* cfilename; Ropeobj179006* tmpbase; Tidtable293850 typecache; Tidtable293850 forwtypecache; Intset269030 declaredthings; Intset269030 declaredprotos; Tlinkedlist147013 headerfiles; Intset269030 typeinfomarker; Tcproc530021* initproc; Tcproc530021* postinitproc; Tcproc530021* preinitproc; Ttypeseq293836* typestack; Tnodetable293862 datacache; Tsymseq293804* forwardedprocs; NI typenodes; NI nimtypes; Ropeobj179006* typenodesname; Ropeobj179006* nimtypesname; NI labels; TY530136 extensionloaders; Ropeobj179006* injectstmt; }; struct Debuginfo204009 { NI version; TY204021* files; TY204023* enums; NIM_BOOL conflicts; }; struct Tident200010 { Tidobj200004 Sup; NimStringDesc* s; Tident200010* next; NI h; }; struct Tcproc530021 { Tsym293834* prc; NIM_BOOL beforeretneeded; NIM_BOOL threadvaraccessed; Tlineinfo192336 lastlineinfo; Tnodeseq293796* nestedtrystmts; NI inexceptblock; TY192350* finallysafepoints; NI labels; TY530095* blocks; NI breakidx; Toption170009Set options; NI maxframelen; Tcgen530027* module; NI withinloop; NI splitdecls; NI gcframeid; Ropeobj179006* gcframetype; }; typedef NU8 Tsymflag293184; typedef NU8 Codegenflag530025; typedef NU8 Toption170009; typedef NU64 Tglobaloption170013Set; typedef NU8 Tglobaloption170013; typedef NU8 Tcommands170076; typedef NU16 Tnodeflag293427Set; typedef NU8 Tnodekind293020; struct Tnode293802 { Ttype293840* typ; Tlineinfo192336 info; Tnodeflag293427Set flags; Tnodekind293020 kind; union{ struct {NI64 intval; } S1; struct {NF floatval; } S2; struct {NimStringDesc* strval; } S3; struct {Tsym293834* sym; } S4; struct {Tident200010* ident; } S5; struct {Tnodeseq293796* sons; } S6; } kindU; NimStringDesc* comment; }; typedef Ropeobj179006* TY534289[1]; typedef NU8 Tlocflag293810; struct Tlistentry147007 { TNimObject Sup; Tlistentry147007* prev; Tlistentry147007* next; }; typedef NU8 Tlibkind293818; struct Tlib293820 { Tlistentry147007 Sup; Tlibkind293818 kind; NIM_BOOL generated; NIM_BOOL isoverriden; Ropeobj179006* name; Tnode293802* path; }; typedef NU8 Tcfilesection530005; typedef NU8 Ttypekind293244; typedef NU8 Tcallingconvention293002; typedef NU32 Ttypeflag293431Set; struct Ttype293840 { Tidobj200004 Sup; Ttypekind293244 kind; Tcallingconvention293002 callconv; Ttypeflag293431Set flags; Ttypeseq293836* sons; Tnode293802* n; Tsym293834* owner; Tsym293834* sym; Tsym293834* destructor; Tsym293834* deepcopy; Tsym293834* assignment; TY293960* methods; NI64 size; NI16 align; NI16 locklevel; Tloc293816 loc; }; typedef Ropeobj179006* TY533811[2]; typedef NU8 Tctypekind530007; typedef NU64 Ttypekind293244Set; typedef NU8 Ttypeflag293431; typedef NimStringDesc* TY534943[14]; typedef NU8 Tprefereddesc321011; typedef Ropeobj179006* TY179507[1]; struct Enumdesc204007 { NI size; NU32 owner; NI id; NimStringDesc* name; TY204017* values; }; typedef Ropeobj179006* TY536235[4]; typedef NimStringDesc* TY293016[10]; typedef Ropeobj179006* TY536238[3]; struct Ropeobj179006 { TNimObject Sup; Ropeobj179006* left; Ropeobj179006* right; NI length; NimStringDesc* data; }; typedef NU8 Tinfoccprop274004Set; struct Tinfocc274008 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; NimStringDesc* Field12; NimStringDesc* Field13; NimStringDesc* Field14; NimStringDesc* Field15; NimStringDesc* Field16; NimStringDesc* Field17; NimStringDesc* Field18; NimStringDesc* Field19; Tinfoccprop274004Set Field20; }; typedef Tinfocc274008 TY274427[13]; typedef NU8 Tsystemcc274002; typedef NU8 Tnodeflag293427; typedef NU8 Tcprocsection530011; typedef Ropeobj179006* Tcprocsections530013[3]; struct Tblock530019 { NI id; Ropeobj179006* label; Tcprocsections530013 sections; NIM_BOOL isloop; NI16 nestedtrystmts; NI16 nestedexceptstmts; NI16 framelen; }; typedef NU8 Tgcmode170080; typedef NU8 Ttypeinforeason538016; struct Ttraversalclosure538019 { Tcproc530021* p; NimStringDesc* visitorfrmt; }; typedef NU8 Ttypefieldresult321145; typedef NU8 Tinfoccprop274004; typedef Ropeobj179006* TY537847[6]; typedef Ropeobj179006* TY537401[7]; typedef Ropeobj179006* TY537475[5]; typedef NU16 Tmsgkind192002; typedef NU8 Tassignmentflag539302Set; typedef NU8 Tassignmentflag539302; typedef NimStringDesc* TY553655[19]; typedef NimStringDesc* TY552642[3]; typedef NimStringDesc* TY557764[4]; typedef NimStringDesc* TY552828[42]; typedef NimStringDesc* TY552281[7]; typedef NU8 Trenderflag312004Set; typedef NimStringDesc* TY558052[2]; typedef NU8 Tclosuretypekind536679; typedef NimStringDesc* TY557428[6]; typedef NU8 Tanalysisresult474003; typedef NU8 char136Set[32]; typedef NU8 Tdistinctcompare325427; typedef NU8 Ttypecmpflag325429Set; typedef NU16 Tspecialword276003; typedef NU8 Tsystemos177004; struct Tfileinfo192334 { NimStringDesc* fullpath; NimStringDesc* projpath; NimStringDesc* shortname; Ropeobj179006* quotedname; Ropeobj179006* quotedfullname; TY192350* lines; NimStringDesc* dirtyfile; }; typedef NU8 Tinfoosprop177031Set; struct Tinfoos177035 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; Tinfoosprop177031Set Field12; }; typedef Tinfoos177035 TY177082[24]; typedef NU8 Tendian177474; struct Tinfocpu177476 { NimStringDesc* Field0; NI Field1; Tendian177474 Field2; NI Field3; NI Field4; }; typedef Tinfocpu177476 TY177510[19]; typedef NU8 Tsystemcpu177452; struct Tstrentry147009 { Tlistentry147007 Sup; NimStringDesc* data; }; struct TY128506 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; }; struct Gcstack49816 { Gcstack49816* prev; Gcstack49816* next; void* starts; void* pos; NI maxstacksize; }; struct Basechunk29437 { NI prevsize; NI size; NIM_BOOL used; }; struct Smallchunk29439 { Basechunk29437 Sup; Smallchunk29439* next; Smallchunk29439* prev; Freecell29429* freelist; NI free; NI acc; NF data; }; struct Llchunk29479 { NI size; NI acc; Llchunk29479* next; }; struct Bigchunk29441 { Basechunk29437 Sup; Bigchunk29441* next; Bigchunk29441* prev; NI align; NF data; }; typedef NI TY29418[16]; struct Trunk29410 { Trunk29410* next; NI key; TY29418 bits; }; typedef Avlnode29483* TY29490[2]; struct Avlnode29483 { TY29490 link; NI key; NI upperbound; NI level; }; struct Pagedesc47313 { Pagedesc47313* next; NI key; TY29418 bits; }; struct Trunk269026 { Trunk269026* next; NI key; TY29418 bits; }; struct Tidpair293846 { Tidobj200004* key; TNimObject* val; }; struct Tnodepair293858 { NI h; Tnode293802* key; NI val; }; struct Filenamemapping204005 { NimStringDesc* package; NimStringDesc* file; NU32 mangled; }; typedef NU8 Treasonforrecompile333002; struct Tiitable300142 { NI counter; Tiipairseq300140* data; }; struct Tindex333019 { NI lastidxkey; NI lastidxval; Tiitable300142 tab; NimStringDesc* r; NI offset; }; struct Table333054 { Keyvaluepairseq333057* data; NI counter; }; struct Memfile331202 { void* mem; NI size; int handle; }; struct Trodreader333021 { TNimObject Sup; NI pos; NCSTRING s; Toption170009Set options; Treasonforrecompile333002 reason; TY333033* moddeps; TY333033* files; NI dataidx; NI convertersidx; NI initidx; NI interfidx; NI compilerprocsidx; NI methodsidx; NimStringDesc* filename; Tindex333019 index; Tindex333019 imports; NI readerindex; NI line; NI moduleid; Table333054 syms; Memfile331202 memfile; Tsymseq293804* methods; NimStringDesc* origfile; NIM_BOOL inviewmode; }; struct TY293961 { NI Field0; Tsym293834* Field1; }; struct Freecell29429 { Freecell29429* next; NI zerofield; }; struct Tinstantiation293824 { Tsym293834* sym; Ttypeseq293836* concretetypes; NI compilesid; }; struct Tiipair300138 { NI key; NI val; }; struct Keyvaluepair333060 { NI Field0; NI Field1; Tsym293834* Field2; }; struct Ttypeseq293836 { TGenericSeq Sup; Ttype293840* data[SEQ_DECL_SIZE]; }; struct TY530153 { TGenericSeq Sup; Tcgen530027* data[SEQ_DECL_SIZE]; }; struct Tsymseq293804 { TGenericSeq Sup; Tsym293834* data[SEQ_DECL_SIZE]; }; struct TY204017 { TGenericSeq Sup; TY204018 data[SEQ_DECL_SIZE]; }; struct TY135002 { TGenericSeq Sup; NimStringDesc* data[SEQ_DECL_SIZE]; }; struct Tbitset340004 { TGenericSeq Sup; NI8 data[SEQ_DECL_SIZE]; }; struct TY530095 { TGenericSeq Sup; Tblock530019 data[SEQ_DECL_SIZE]; }; struct TY192350 { TGenericSeq Sup; Ropeobj179006* data[SEQ_DECL_SIZE]; }; struct Tnodeseq293796 { TGenericSeq Sup; Tnode293802* data[SEQ_DECL_SIZE]; }; struct TY192612 { TGenericSeq Sup; Tfileinfo192334 data[SEQ_DECL_SIZE]; }; struct Trunkseq269028 { TGenericSeq Sup; Trunk269026* data[SEQ_DECL_SIZE]; }; struct TY293929 { TGenericSeq Sup; Tinstantiation293824* data[SEQ_DECL_SIZE]; }; struct Tidpairseq293848 { TGenericSeq Sup; Tidpair293846 data[SEQ_DECL_SIZE]; }; struct Tnodepairseq293860 { TGenericSeq Sup; Tnodepair293858 data[SEQ_DECL_SIZE]; }; struct TY204021 { TGenericSeq Sup; Filenamemapping204005 data[SEQ_DECL_SIZE]; }; struct TY204023 { TGenericSeq Sup; Enumdesc204007 data[SEQ_DECL_SIZE]; }; struct TY293960 { TGenericSeq Sup; TY293961 data[SEQ_DECL_SIZE]; }; struct TY333033 { TGenericSeq Sup; NI32 data[SEQ_DECL_SIZE]; }; struct Tiipairseq300140 { TGenericSeq Sup; Tiipair300138 data[SEQ_DECL_SIZE]; }; struct Keyvaluepairseq333057 { TGenericSeq Sup; Keyvaluepair333060 data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d0, NI op0); N_NIMCALL(void, T839829468_2)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0); N_NIMCALL(void, T839829468_3)(void); N_NIMCALL(Ropeobj179006*, rope_179277_2381377266)(NimStringDesc* s0); static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0); static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0); static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0); N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47321* s0, Cell47305* c0); N_NIMCALL(void, T839829468_5)(void); N_NIMCALL(void, T839829468_6)(void); static N_INLINE(void, nimGCunrefNoCycle)(void* p0); N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0); N_NIMCALL(void, T839829468_7)(void); N_NIMCALL(void, initintset_269885_2627731572)(Intset269030* Result); N_NOINLINE(void, chckNil)(void* p0); N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0); N_NIMCALL(void, T839829468_8)(void); N_NIMCALL(Tcgen530027*, newmodule_564045_839829468)(Tsym293834* module0); N_NIMCALL(Tcgen530027*, getcgenmodule_533226_839829468)(Tsym293834* s0); N_NIMCALL(void, internalerror_197113_155036129)(NimStringDesc* errmsg0); N_NIMCALL(NimStringDesc*, HEX24_197185_1689653243)(TY204018 x0); N_NIMCALL(Tcgen530027*, rawnewmodule_564038_839829468)(Tsym293834* module0); N_NIMCALL(Tcgen530027*, rawnewmodule_563663_839829468)(Tsym293834* module0, NimStringDesc* filename0); N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0); static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0); static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0); N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0); N_NIMCALL(NU32, hashowner_533977_839829468)(Tsym293834* s0); N_NIMCALL(NU32, register_204121_1926258066)(Debuginfo204009* self0, NimStringDesc* package0, NimStringDesc* file0); N_NIMCALL(NimStringDesc*, rawNewString)(NI space0); N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0); N_NIMCALL(void, initidtable_297019_850551059)(Tidtable293850* x0); N_NIMCALL(Tcproc530021*, newproc_530206_3723162438)(Tsym293834* prc0, Tcgen530027* module0); static N_INLINE(void, asgnRef)(void** dest0, void* src0); static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0); static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0); N_NIMCALL(Toption170009Set, initprocoptions_563635_839829468)(Tcgen530027* m0); N_NIMCALL(Tcproc530021*, newpreinitproc_563625_839829468)(Tcgen530027* m0); N_NIMCALL(Tcproc530021*, newpostinitproc_563630_839829468)(Tcgen530027* m0); N_NIMCALL(void, initnodetable_297085_850551059)(Tnodetable293862* x0); N_NIMCALL(Ropeobj179006*, gettempname_534596_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, HEX26_179418_2381377266)(Ropeobj179006* a0, Ropeobj179006* b0); N_NIMCALL(Ropeobj179006*, rope_179401_2381377266)(NI64 i0); N_NIMCALL(NimStringDesc*, tofullpath_193264_155036129)(NI32 fileidx0); N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0); N_NIMCALL(NimStringDesc*, tofilename_193260_155036129)(NI32 fileidx0); N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0); N_NIMCALL(NimStringDesc*, completecfilepath_274854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0); N_NIMCALL(void, readmergeinfo_531613_2760143328)(NimStringDesc* cfilename0, Tcgen530027* m0); N_NIMCALL(NimStringDesc*, getcfile_564204_839829468)(Tcgen530027* m0); N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0); N_NIMCALL(NimStringDesc*, withpackagename_171073_2607990831)(NimStringDesc* path0); static N_INLINE(NIM_BOOL, skipcodegen_342085_2355241294)(Tnode293802* n0); N_NIMCALL(void, genstmts_540244_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(void, expr_540248_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, fillprocloc_540201_839829468)(Tsym293834* sym0); N_NIMCALL(void, fillloc_533282_839829468)(Tloc293816* a0, Tlockind293808 k0, Ttype293840* typ0, Ropeobj179006* r0, Tstorageloc293812 s0); N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0); N_NIMCALL(Ropeobj179006*, manglename_534205_839829468)(Tsym293834* s0); N_NIMCALL(NIM_BOOL, iskeyword_533960_839829468)(Tident200010* w0); N_NIMCALL(NimStringDesc*, mangle_529847_2036603609)(NimStringDesc* name0); N_NIMCALL(void, add_179487_2381377266)(Ropeobj179006** a0, NimStringDesc* b0); N_NIMCALL(void, add_179482_2381377266)(Ropeobj179006** a0, Ropeobj179006* b0); N_NIMCALL(Ropeobj179006*, HEX25_179905_2381377266)(NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(void, genprocprototype_540254_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, useheader_533369_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0); N_NIMCALL(NimStringDesc*, getstr_298230_850551059)(Tnode293802* a0); N_NIMCALL(Tsym293834*, getmodule_300123_2984716966)(Tsym293834* s0); N_NIMCALL(NIM_BOOL, containsorincl_269862_2627731572)(Intset269030* s0, NI key0); N_NIMCALL(Ropeobj179006*, ropecg_533407_839829468)(Tcgen530027* m0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0); static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0); N_NIMCALL(Ropeobj179006*, cgsym_533403_839829468)(Tcgen530027* m0, NimStringDesc* name0); N_NIMCALL(Tsym293834*, getcompilerproc_339746_3937434831)(NimStringDesc* name0); N_NIMCALL(void, genproc_533951_839829468)(Tcgen530027* m0, Tsym293834* prc0); N_NIMCALL(NIM_BOOL, isactivated_562431_839829468)(Tsym293834* prc0); N_NIMCALL(void, addforwardedproc_533203_839829468)(Tcgen530027* m0, Tsym293834* prc0); N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0); N_NIMCALL(void, genprocnoforward_561906_839829468)(Tcgen530027* m0, Tsym293834* prc0); N_NIMCALL(void, genprocaux_561284_839829468)(Tcgen530027* m0, Tsym293834* prc0); N_NIMCALL(Ropeobj179006*, genprocheader_536867_839829468)(Tcgen530027* m0, Tsym293834* prc0); N_NIMCALL(void, genclinedir_533813_839829468)(Ropeobj179006** r0, Tlineinfo192336 info0); N_NIMCALL(void, genclinedir_533725_839829468)(Ropeobj179006** r0, NimStringDesc* filename0, NI line0); N_NIMCALL(void, addf_180205_2381377266)(Ropeobj179006** c0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, makesinglelinecstring_529835_2036603609)(NimStringDesc* s0); N_NIMCALL(NI, safelinenm_533721_839829468)(Tlineinfo192336 info0); static N_INLINE(NI, tolinenumber_193415_155036129)(Tlineinfo192336 info0); N_NIMCALL(void, genprocparams_535115_839829468)(Tcgen530027* m0, Ttype293840* t0, Ropeobj179006** rettype0, Ropeobj179006** params0, Intset269030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0); N_NIMCALL(NIM_BOOL, isinvalidreturntype_534548_839829468)(Ttype293840* rettype0); N_NIMCALL(Tctypekind530007, maptype_534393_839829468)(Ttype293840* typ0); N_NIMCALL(Tctypekind530007, mapsettype_534389_839829468)(Ttype293840* typ0); N_NIMCALL(NI64, getsize_321135_3876443242)(Ttype293840* typ0); N_NIMCALL(Ttype293840*, lastson_296377_850551059)(Ttype293840* n0); N_NIMCALL(NI64, firstord_321001_3876443242)(Ttype293840* t0); N_NIMCALL(Ttype293840*, skiptypes_297099_850551059)(Ttype293840* t0, Ttypekind293244Set kinds0); N_NIMCALL(NIM_BOOL, isimportedcpptype_534476_839829468)(Ttype293840* t0); N_NIMCALL(NIM_BOOL, needscomplexassignment_534509_839829468)(Ttype293840* typ0); N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_321117_3876443242)(Ttype293840* typ0); static N_INLINE(NIM_BOOL, isobjlackingtypefield_534513_839829468)(Ttype293840* typ0); N_NIMCALL(NIM_BOOL, ispureobject_321138_3876443242)(Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, gettypedescaux_534503_839829468)(Tcgen530027* m0, Ttype293840* typ0, Intset269030* check0); N_NIMCALL(Ttype293840*, getuniquetype_529640_2036603609)(Ttype293840* key0); N_NIMCALL(Ropeobj179006*, gettypepre_534972_839829468)(Tcgen530027* m0, Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, getsimpletypedesc_534936_839829468)(Tcgen530027* m0, Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, typenameorliteral_534898_839829468)(Ttype293840* t0, NimStringDesc* literal0); N_NIMCALL(Ropeobj179006*, gettypename_534313_839829468)(Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, typename_534292_839829468)(Ttype293840* typ0); N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0); N_NIMCALL(Ropeobj179006*, cachegettype_534591_839829468)(Tidtable293850 tab0, Ttype293840* key0); N_NIMCALL(TNimObject*, idtableget_300086_2984716966)(Tidtable293850 t0, Tidobj200004* key0); N_NIMCALL(NimStringDesc*, typetostring_321017_3876443242)(Ttype293840* typ0, Tprefereddesc321011 prefer0); N_NIMCALL(Ttype293840*, elemtype_321394_3876443242)(Ttype293840* t0); N_NIMCALL(Ropeobj179006*, HEX26_179447_2381377266)(Ropeobj179006* a0, NimStringDesc* b0); N_NIMCALL(Ropeobj179006*, gettypeforward_535039_839829468)(Tcgen530027* m0, Ttype293840* typ0); N_NIMCALL(NIM_BOOL, isimportedtype_534449_839829468)(Ttype293840* t0); N_NIMCALL(NimStringDesc*, getforwardstructformat_535015_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, structorunion_535001_839829468)(Ttype293840* t0); N_NIMCALL(void, idtableput_300094_2984716966)(Tidtable293850* t0, Tidobj200004* key0, TNimObject* val0); N_NIMCALL(void, pushtype_534958_839829468)(Tcgen530027* m0, Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, gettypedescweak_535079_839829468)(Tcgen530027* m0, Ttype293840* t0, Intset269030* check0); N_NIMCALL(void, internalerror_197100_155036129)(Tlineinfo192336 info0, NimStringDesc* errmsg0); N_NIMCALL(NIM_BOOL, hasenum_204230_1926258066)(Debuginfo204009 self0, NimStringDesc* ename0, NI id0, NU32 owner0); N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0); static N_INLINE(NI, len_294081_850551059)(Tnode293802* n0); N_NIMCALL(void, registerenum_204419_1926258066)(Debuginfo204009* self0, Enumdesc204007* ed0); N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0); N_NIMCALL(void, appcg_533632_839829468)(Tcgen530027* m0, Ropeobj179006** c0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(NI64, lengthord_321007_3876443242)(Ttype293840* t0); N_NIMCALL(NIM_BOOL, scancppgenericslot_535827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0); N_NIMCALL(Ttype293840*, resolvestarsincpptype_535891_839829468)(Ttype293840* typ0, NI idx0, NI stars0); N_NIMCALL(NI, len_296339_850551059)(Ttype293840* n0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0); N_NIMCALL(Ropeobj179006*, getrecorddesc_535643_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0, Intset269030* check0); N_NIMCALL(Ropeobj179006*, getrecordfields_535636_839829468)(Tcgen530027* m0, Ttype293840* typ0, Intset269030* check0); N_NIMCALL(Ropeobj179006*, genrecordfieldsaux_535421_839829468)(Tcgen530027* m0, Tnode293802* n0, Ropeobj179006* accessexpr0, Ttype293840* rectype0, Intset269030* check0); N_NIMCALL(NI, sonslen_296351_850551059)(Tnode293802* n0); N_NIMCALL(Tnode293802*, lastson_296364_850551059)(Tnode293802* n0); N_NIMCALL(Ropeobj179006*, HEX26_179452_2381377266)(NimStringDesc* a0, Ropeobj179006* b0); N_NIMCALL(Ropeobj179006*, manglerecfieldname_535361_839829468)(Tsym293834* field0, Ttype293840* rectype0); N_NIMCALL(NimStringDesc*, manglefield_533973_839829468)(Tident200010* name0); N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0); N_NIMCALL(Ropeobj179006*, gettupledesc_535777_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0, Intset269030* check0); N_NIMCALL(NI, sonslen_296327_850551059)(Ttype293840* n0); N_NIMCALL(void, excl_269841_2627731572)(Intset269030* s0, NI key0); static N_INLINE(NIM_BOOL, iscompiletimeonly_329706_3876443242)(Ttype293840* t0); N_NIMCALL(Tstorageloc293812, paramstorageloc_535098_839829468)(Tsym293834* param0); N_NIMCALL(NIM_BOOL, ccgintroducedptr_534609_839829468)(Tsym293834* s0); N_NIMCALL(Tctypekind530007, mapreturntype_534445_839829468)(Ttype293840* typ0); N_NIMCALL(Tnode293802*, easyresultasgn_561191_839829468)(Tnode293802* n0); static N_INLINE(Tnode293802*, HEX5BHEX5D_294238_850551059)(Tnode293802* n0, NI i0); N_NIMCALL(Tnode293802*, getbody_336227_1724185294)(Tsym293834* s0); N_NIMCALL(Ropeobj179006*, localvardecl_539532_839829468)(Tcproc530021* p0, Tsym293834* s0); N_NIMCALL(Ropeobj179006*, gettypedesc_536671_839829468)(Tcgen530027* m0, Ttype293840* typ0); N_NIMCALL(void, initlocexprsingleuse_540289_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* result0); N_NIMCALL(void, initloc_533273_839829468)(Tloc293816* result0, Tlockind293808 k0, Ttype293840* typ0, Tstorageloc293812 s0); N_NIMCALL(void, linefmt_533714_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); static N_INLINE(Ropeobj179006**, s_530179_3723162438)(Tcproc530021* p0, Tcprocsection530011 s0); N_NIMCALL(Ropeobj179006*, indentline_533656_839829468)(Tcproc530021* p0, Ropeobj179006* r0); N_NIMCALL(void, prepend_179893_2381377266)(Ropeobj179006** a0, Ropeobj179006* b0); N_NIMCALL(Ropeobj179006*, rdloc_539188_839829468)(Tloc293816 a0); N_NIMCALL(void, assignlocalvar_539614_839829468)(Tcproc530021* p0, Tsym293834* s0); N_NIMCALL(void, line_533690_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, Ropeobj179006* r0); N_NIMCALL(void, localdebuginfo_539449_839829468)(Tcproc530021* p0, Tsym293834* s0); N_NIMCALL(void, linef_533700_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(Ropeobj179006*, makecstring_192638_155036129)(NimStringDesc* s0); N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0); N_NIMCALL(Ropeobj179006*, gentypeinfo_536941_839829468)(Tcgen530027* m0, Ttype293840* t_536944_839829468); N_NIMCALL(Tcgen530027*, bmod_530201_3723162438)(Tsym293834* module0); N_NIMCALL(void, gentypeinfoauxbase_536960_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0, Ropeobj179006* base0); N_NIMCALL(NIM_BOOL, canformacycle_321123_3876443242)(Ttype293840* typ0); N_NIMCALL(void, gentupleinfo_537549_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0); N_NIMCALL(Ropeobj179006*, getnimnode_536945_839829468)(Tcgen530027* m0); N_NIMCALL(Ttype293840*, fakeclosuretype_538010_839829468)(Tsym293834* owner0); N_NIMCALL(Ttype293840*, newtype_296107_850551059)(Ttypekind293244 kind0, Tsym293834* owner0); N_NIMCALL(void, rawaddson_297394_850551059)(Ttype293840* father0, Ttype293840* son0); N_NIMCALL(void, gentypeinfoaux_537027_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0); N_NIMCALL(Ropeobj179006*, gentraverseproc_538632_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttypeinforeason538016 reason0); N_NIMCALL(void, gentraverseprocseq_538399_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Ttype293840* typ0); N_NIMCALL(void, gettemp_538032_839829468)(Tcproc530021* p0, Ttype293840* t0, Tloc293816* result0, NIM_BOOL needsinit0); N_NIMCALL(void, constructloc_539388_839829468)(Tcproc530021* p0, Tloc293816 loc0, NIM_BOOL istemp0); static N_INLINE(NIM_BOOL, iscomplexvaluetype_539317_839829468)(Ttype293840* t0); N_NIMCALL(void, usestringh_533345_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, addrloc_539204_839829468)(Tloc293816 a0); N_NIMCALL(void, genobjectinit_539242_839829468)(Tcproc530021* p0, Tcprocsection530011 section0, Ttype293840* t0, Tloc293816 a0, NIM_BOOL takeaddr0); N_NIMCALL(Ttypefieldresult321145, analyseobjectwithtypefield_321149_3876443242)(Ttype293840* t0); N_NIMCALL(Ttype293840*, getsystype_339150_3937434831)(Ttypekind293244 kind0); N_NIMCALL(void, gentraverseproc_538022_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Ttype293840* typ_538027_839829468); static N_INLINE(Ropeobj179006*, parentobj_538257_839829468)(Ropeobj179006* accessor0, Tcgen530027* m0); N_NIMCALL(void, gentraverseproc_538039_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Tnode293802* n0); N_NIMCALL(void, gencaserange_538028_839829468)(Tcproc530021* p0, Tnode293802* branch0); N_NIMCALL(Ropeobj179006*, genliteral_540273_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Ropeobj179006*, genliteral_550476_839829468)(Tcproc530021* p0, Tnode293802* n0, Ttype293840* ty0); N_NIMCALL(Ropeobj179006*, intliteral_540270_839829468)(NI64 i0); N_NIMCALL(Ropeobj179006*, int64literal_550430_839829468)(NI64 i0); N_NIMCALL(Ropeobj179006*, uint64literal_550442_839829468)(NU64 i0); N_NIMCALL(NI, nodetabletestorset_343682_1142335848)(Tnodetable293862* t0, Tnode293802* key0, NI val0); N_NIMCALL(Ropeobj179006*, getstrlit_550468_839829468)(Tcgen530027* m0, NimStringDesc* s0); N_NIMCALL(NimStringDesc*, tostrmaxprecision_299007_3471544153)(NF f0); N_NIMCALL(Tnode293802*, copynode_297528_850551059)(Tnode293802* src0); N_NIMCALL(void, linecg_533707_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(void, genarrayinfo_538005_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0); N_NIMCALL(void, gensetinfo_537867_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0); N_NIMCALL(void, genenuminfo_537597_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0); N_NIMCALL(void, genobjectinfo_537506_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0); N_NIMCALL(void, genobjectfields_537104_839829468)(Tcgen530027* m0, Ttype293840* typ0, Tnode293802* n0, Ropeobj179006* expr0); N_NIMCALL(Ropeobj179006*, discriminatortablename_537057_839829468)(Tcgen530027* m0, Ttype293840* objtype_537060_839829468, Tsym293834* d0); N_NIMCALL(Tsym293834*, lookupinrecord_300119_2984716966)(Tnode293802* n0, Tident200010* field0); N_NIMCALL(NI64, getordvalue_321129_3876443242)(Tnode293802* n0); N_NIMCALL(void, gendeepcopyproc_539066_839829468)(Tcgen530027* m0, Tsym293834* s0, Ropeobj179006* result0); N_NIMCALL(void, initlocalvar_539398_839829468)(Tcproc530021* p0, Tsym293834* v0, NIM_BOOL immediateasgn0); N_NIMCALL(void, fillresult_534865_839829468)(Tsym293834* param0); N_NIMCALL(void, assignparam_539994_839829468)(Tcproc530021* p0, Tsym293834* s0); N_NIMCALL(void, closuresetup_561158_839829468)(Tcproc530021* p0, Tsym293834* prc0); N_NIMCALL(Ropeobj179006*, initgcframe_539435_839829468)(Tcproc530021* p0); N_NIMCALL(Ropeobj179006*, initframe_561140_839829468)(Tcproc530021* p0, Ropeobj179006* procname0, Ropeobj179006* filename0); N_NIMCALL(Ropeobj179006*, quotedfilename_197818_155036129)(Tlineinfo192336 i0); N_NIMCALL(void, appcg_533648_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(Ropeobj179006*, deinitgcframe_539441_839829468)(Tcproc530021* p0); N_NIMCALL(Ropeobj179006*, deinitframe_561150_839829468)(Tcproc530021* p0); N_NIMCALL(Tcgen530027*, findpendingmodule_533241_839829468)(Tcgen530027* m0, Tsym293834* s0); N_NIMCALL(void, symindynamiclib_560929_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(NIM_BOOL, isgetprocaddr_560442_839829468)(Tlib293820* lib0); N_NIMCALL(void, loaddynamiclib_560480_839829468)(Tcgen530027* m0, Tlib293820* lib0); N_NIMCALL(void, libcandidates_171605_2607990831)(NimStringDesc* s0, TY135002** dest0); N_NIMCALL(void, rawmessage_195612_155036129)(Tmsgkind192002 msg0, NimStringDesc* arg0); N_NIMCALL(void, initlocexpr_540283_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* result0); N_NIMCALL(Ropeobj179006*, mangledynlibproc_539816_839829468)(Tsym293834* sym0); N_NIMCALL(NimStringDesc*, HEX24_179856_2381377266)(Ropeobj179006* r0); N_NIMCALL(void, symindynamiclibpartial_561071_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, genvarprototype_540236_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, genvarprototypeaux_545254_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, declarethreadvar_539676_839829468)(Tcgen530027* m0, Tsym293834* s0, NIM_BOOL isextern0); static N_INLINE(NIM_BOOL, emulatedthreadvars_533949_839829468)(void); static N_INLINE(NIM_BOOL, crossescppboundary_561754_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, putlocintodest_540258_839829468)(Tcproc530021* p0, Tloc293816* d0, Tloc293816 s0); N_NIMCALL(void, genassignment_540264_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0); N_NIMCALL(void, genrefassign_539311_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0); static N_INLINE(NIM_BOOL, usesnativegc_170177_2607990831)(void); N_NIMCALL(void, optasgnloc_550788_839829468)(Tloc293816 a0, Ttype293840* t0, Ropeobj179006* field0, Tloc293816* Result); N_NIMCALL(void, genoptasgntuple_551001_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0); N_NIMCALL(void, gengenericasgn_551167_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0); N_NIMCALL(NI, asgncomplexity_550750_839829468)(Tnode293802* n0); N_NIMCALL(void, genoptasgnobject_551084_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0, Tnode293802* t0); N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0); N_NIMCALL(void, localerror_197085_155036129)(Tlineinfo192336 info0, NimStringDesc* arg0); N_NIMCALL(NIM_BOOL, issimpleconst_533311_839829468)(Ttype293840* typ0); N_NIMCALL(void, putintodest_551468_839829468)(Tcproc530021* p0, Tloc293816* d0, Ttype293840* t0, Ropeobj179006* r0, Tstorageloc293812 s0); N_NIMCALL(void, gencomplexconst_559249_839829468)(Tcproc530021* p0, Tsym293834* sym0, Tloc293816* d0); N_NIMCALL(void, requestconstimpl_540240_839829468)(Tcproc530021* p0, Tsym293834* sym0); N_NIMCALL(Ropeobj179006*, genconstexpr_555849_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, tobitset_341001_452470228)(Tnode293802* s0, Tbitset340004** b0); N_NIMCALL(Ropeobj179006*, genrawsetdata_550629_839829468)(Tbitset340004* cs0, NI size0); N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0); N_NIMCALL(NI64, bitsettoword_550578_839829468)(Tbitset340004* s0, NI size0); N_NIMCALL(Ropeobj179006*, genconstseq_560371_839829468)(Tcproc530021* p0, Tnode293802* n0, Ttype293840* t0); N_NIMCALL(void, appcg_533640_839829468)(Tcgen530027* m0, Tcfilesection530005 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(Ropeobj179006*, genconstsimplelist_560299_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Ropeobj179006*, gennamedconstexpr_560284_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, accessthreadlocalvar_533945_839829468)(Tcproc530021* p0, Tsym293834* s0); static N_INLINE(Ropeobj179006**, procsec_530194_3723162438)(Tcproc530021* p0, Tcprocsection530011 s0); static N_INLINE(NIM_BOOL, isemptytype_298440_850551059)(Ttype293840* t0); N_NIMCALL(void, putdataintodest_551436_839829468)(Tcproc530021* p0, Tloc293816* d0, Ttype293840* t0, Ropeobj179006* r0); N_NIMCALL(void, genlinedir_533823_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(Ropeobj179006*, sourceline_193068_155036129)(Tlineinfo192336 i0); N_NIMCALL(NIM_BOOL, freshlineinfo_533818_839829468)(Tcproc530021* p0, Tlineinfo192336 info0); N_NIMCALL(void, genmagicexpr_558033_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0); N_NIMCALL(void, genandor_555311_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0); N_NIMCALL(Ropeobj179006*, getlabel_540217_839829468)(Tcproc530021* p0); N_NIMCALL(void, fixlabel_540230_839829468)(Tcproc530021* p0, Ropeobj179006* labl0); N_NIMCALL(void, unaryarith_553646_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0); N_NIMCALL(void, unaryarithoverflow_552633_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0); N_NIMCALL(void, binaryfloatarith_557728_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0); N_NIMCALL(void, binaryarith_552819_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0); N_NIMCALL(void, geneqproc_553214_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, binaryarithoverflow_552262_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0); N_NIMCALL(Ropeobj179006*, binaryarithoverflowraw_552235_839829468)(Tcproc530021* p0, Ttype293840* t0, Tloc293816 a0, Tloc293816 b0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj179006*, rdcharloc_539227_839829468)(Tloc293816 a0); N_NIMCALL(NI64, lastord_321004_3876443242)(Ttype293840* t0); N_NIMCALL(void, genrepr_556339_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(Ropeobj179006*, lenfield_540305_839829468)(Tcproc530021* p0); N_NIMCALL(void, gcusage_555439_839829468)(Tnode293802* n0); N_NIMCALL(void, message_197095_155036129)(Tlineinfo192336 info0, Tmsgkind192002 msg0, NimStringDesc* arg0); N_NIMCALL(NimStringDesc*, rendertree_312044_382274130)(Tnode293802* n0, Trenderflag312004Set renderflags0); N_NIMCALL(void, gengettypeinfo_556383_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genswap_556638_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, unaryexpr_552209_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, binarystmt_551501_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genstrconcat_555452_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genstrappend_555554_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genseqelemappend_555683_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genstrequals_557666_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, binaryexpr_551549_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genisnil_553620_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, gendollar_556391_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genof_556331_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genof_556201_839829468)(Tcproc530021* p0, Tnode293802* x0, Ttype293840* typ0, Tloc293816* d0); N_NIMCALL(void, globalerror_197071_155036129)(Tlineinfo192336 info0, Tmsgkind192002 msg0, NimStringDesc* arg0); N_NIMCALL(Ropeobj179006*, genofhelper_556139_839829468)(Tcproc530021* p0, Ttype293840* dest0, Ropeobj179006* a0); N_NIMCALL(void, gennew_555782_839829468)(Tcproc530021* p0, Tnode293802* e0); N_NIMCALL(void, rawgennew_555741_839829468)(Tcproc530021* p0, Tloc293816 a0, Ropeobj179006* sizeexpr_555745_839829468); N_NIMCALL(void, gennewfinalize_556110_839829468)(Tcproc530021* p0, Tnode293802* e0); N_NIMCALL(void, gennewseq_555824_839829468)(Tcproc530021* p0, Tnode293802* e0); N_NIMCALL(void, gennewseqaux_555795_839829468)(Tcproc530021* p0, Tloc293816 dest0, Ropeobj179006* length0); N_NIMCALL(void, gennewseqofcap_555836_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, gensomecast_557480_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(Ropeobj179006*, getclosuretype_536683_839829468)(Tcgen530027* m0, Ttype293840* t0, Tclosuretypekind536679 kind0); N_NIMCALL(void, genord_557474_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, unaryexprchar_552222_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genarraylen_556415_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0); N_NIMCALL(void, unarystmt_551527_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gensetlengthstr_556632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, gensetlengthseq_556500_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, gensetop_557419_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0); N_NIMCALL(void, binarystmtinexcl_556857_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj179006*, rdsetelemloc_556662_839829468)(Tloc293816 a0, Ttype293840* settype0); N_NIMCALL(void, binaryexprchar_551809_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, geninop_557009_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, fewcmps_556803_839829468)(Tnode293802* s0); N_NIMCALL(void, geninexpraux_554496_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* a0, Tloc293816* b0, Tloc293816* d0); N_NIMCALL(void, binaryexprin_556837_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* a0, Tloc293816* b0, Tloc293816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gencall_544632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genclosurecall_541452_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0); N_NIMCALL(Ropeobj179006*, genarg_540787_839829468)(Tcproc530021* p0, Tnode293802* n_540790_839829468, Tsym293834* param0, Tnode293802* call0); static N_INLINE(Ropeobj179006*, genargstringtocstring_540776_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Ropeobj179006*, openarrayloc_540665_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Tnode293802*, skipconv_329882_3876443242)(Tnode293802* n0); N_NIMCALL(Tmagic293524, getmagic_319502_2616423590)(Tnode293802* op0); N_NIMCALL(Ropeobj179006*, genargnoparam_540938_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Ropeobj179006*, getrawproctype_541459_839829468)(Tcproc530021* p0, Ttype293840* t0); N_NIMCALL(NIM_BOOL, leftappearsonrightside_540329_839829468)(Tnode293802* le0, Tnode293802* ri0); N_NIMCALL(Tanalysisresult474003, ispartof_474340_788060399)(Tnode293802* a0, Tnode293802* b0); static N_INLINE(NIM_BOOL, hasnoinit_540383_839829468)(Tnode293802* call0); N_NIMCALL(void, resetloc_539350_839829468)(Tcproc530021* p0, Tloc293816* loc0); N_NIMCALL(Ropeobj179006*, addcomma_541464_839829468)(Ropeobj179006* r0); N_NIMCALL(void, geninfixcall_542929_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0); N_NIMCALL(Ropeobj179006*, genpatterncall_542699_839829468)(Tcproc530021* p0, Tnode293802* ri_542702_839829468, NimStringDesc* pat0, Ttype293840* typ_542704_839829468); N_NIMCALL(Ropeobj179006*, genotherarg_540277_839829468)(Tcproc530021* p0, Tnode293802* ri0, NI i0, Ttype293840* typ0); N_NIMCALL(Ropeobj179006*, genthisarg_542475_839829468)(Tcproc530021* p0, Tnode293802* ri_542478_839829468, NI i0, Ttype293840* typ0); N_NIMCALL(Tnode293802*, skipaddrderef_542433_839829468)(Tnode293802* node0); N_NIMCALL(void, fixupcall_540410_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0, Ropeobj179006* callee0, Ropeobj179006* params0); N_NIMCALL(void, gennamedparamcall_543616_839829468)(Tcproc530021* p0, Tnode293802* ri0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0); N_NIMCALL(void, genprefixcall_540960_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0); static N_INLINE(void, poststmtactions_533942_839829468)(Tcproc530021* p0); N_NIMCALL(void, genreset_555731_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, genecho_555369_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0); N_NIMCALL(void, genarrtoseq_556046_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); N_NIMCALL(void, genseqconstr_556004_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); N_NIMCALL(void, localerror_197080_155036129)(Tlineinfo192336 info0, Tmsgkind192002 msg0, NimStringDesc* arg0); N_NIMCALL(Tnode293802*, wrapprocforspawn_436501_2218250499)(Tsym293834* owner0, Tnode293802* spawnexpr0, Ttype293840* rettype0, Tnode293802* barrier0, Tnode293802* dest0); N_NIMCALL(Tnode293802*, liftparallel_479822_1773027539)(Tsym293834* owner0, Tnode293802* n0); N_NIMCALL(void, gendeepcopy_551374_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0); N_NIMCALL(NIM_BOOL, isdeepconstexpr_319566_2616423590)(Tnode293802* n0); N_NIMCALL(Ropeobj179006*, gensetnode_550664_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, gensetconstr_558496_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0); N_NIMCALL(void, exprcomplexconst_559684_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genarrayconstr_559207_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, handleconstexpr_555853_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, gentupleconstr_558618_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genobjconstr_555903_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(Tsym293834*, lookupfieldagain_554153_839829468)(Tcproc530021* p0, Ttype293840* ty_554156_839829468, Tsym293834* field0, Ropeobj179006** r0); N_NIMCALL(void, genfieldcheck_554504_839829468)(Tcproc530021* p0, Tnode293802* e0, Ropeobj179006* obj0, Tsym293834* field0, Ttype293840* origty0); N_NIMCALL(Tnode293802*, newstrnode_294678_850551059)(Tnodekind293020 kind0, NimStringDesc* strval0); N_NIMCALL(void, gencast_557537_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genconv_557632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, comparetypes_327214_3876443242)(Ttype293840* x0, Ttype293840* y0, Tdistinctcompare325427 cmp0, Ttypecmpflag325429Set flags0); N_NIMCALL(void, genaddr_554051_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); static N_INLINE(NIM_BOOL, iscppref_553807_839829468)(Tcproc530021* p0, Ttype293840* typ0); N_NIMCALL(void, genbracketexpr_555277_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genarrayelem_555093_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, isconstexpr_319510_2616423590)(Tnode293802* n0); N_NIMCALL(void, genopenarrayelem_555169_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0); N_NIMCALL(void, genseqelem_555205_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0); N_NIMCALL(void, gencstringelem_555144_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0); N_NIMCALL(void, gentupleelem_554124_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genderef_544921_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NIM_BOOL enforcederef0); N_NIMCALL(void, genrecordfield_554448_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(Ttype293840*, genrecordfieldaux_554096_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tloc293816* a0); N_NIMCALL(void, gencheckedrecordfield_555046_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0); N_NIMCALL(void, genblock_547083_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(NI, startblock_544978_839829468)(Tcproc530021* p0, NimStringDesc* start0, Ropeobj179006** args0, NI args0Len0); N_NIMCALL(void, endblock_545060_839829468)(Tcproc530021* p0); N_NIMCALL(void, endblock_545035_839829468)(Tcproc530021* p0, Ropeobj179006* blockend0); N_NIMCALL(Ropeobj179006*, blockbody_545025_839829468)(Tblock530019* b0); N_NIMCALL(void, genstmtlistexpr_559402_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genif_545982_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, downconv_559581_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(NI, inheritancediff_327252_3876443242)(Ttype293840* a0, Ttype293840* b0); N_NIMCALL(void, upconv_559431_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genrangechck_557590_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0, NimStringDesc* magic0); N_NIMCALL(void, convstrtocstr_557642_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, convcstrtostr_557654_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, genclosure_558836_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); static N_INLINE(NIM_BOOL, isconstclosure_558810_839829468)(Tnode293802* n0); static N_INLINE(NIM_BOOL, isroutine_298323_850551059)(Tsym293834* s0); N_NIMCALL(void, genwhilestmt_546984_839829468)(Tcproc530021* p0, Tnode293802* t0); static N_INLINE(Ropeobj179006*, assignlabel_545020_839829468)(Tblock530019* b0); N_NIMCALL(NIM_BOOL, stmtscontainpragma_529083_2036603609)(Tnode293802* n0, Tspecialword276003 w0); N_NIMCALL(void, gencomputedgoto_546744_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, genvarstmt_545854_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, gensinglevar_545276_839829468)(Tcproc530021* p0, Tnode293802* a0); N_NIMCALL(void, gengotovar_545258_839829468)(Tcproc530021* p0, Tnode293802* value0); N_NIMCALL(void, assignglobalvar_539819_839829468)(Tcproc530021* p0, Tsym293834* s0); N_NIMCALL(void, varindynamiclib_539812_839829468)(Tcgen530027* m0, Tsym293834* sym0); N_NIMCALL(void, registergcroot_544762_839829468)(Tcproc530021* p0, Tsym293834* v0); N_NIMCALL(Ropeobj179006*, gentraverseprocforglobal_539032_839829468)(Tcgen530027* m0, Tsym293834* s0); static N_INLINE(NIM_BOOL, isassignedimmediately_544781_839829468)(Tnode293802* n0); N_NIMCALL(NIM_BOOL, containshiddenpointer_321120_3876443242)(Ttype293840* typ0); static N_INLINE(void, loadinto_544928_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* a0); N_NIMCALL(void, genasgncall_544695_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0); N_NIMCALL(void, genclosurevar_545832_839829468)(Tcproc530021* p0, Tnode293802* a0); N_NIMCALL(void, genvartuple_544794_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Tnode293802*, lowertupleunpacking_434037_2218250499)(Tnode293802* n0, Tsym293834* owner0); N_NIMCALL(void, genconststmt_545909_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(NIM_BOOL, containscompiletimeonly_329721_3876443242)(Ttype293840* t0); static N_INLINE(NIM_BOOL, emitlazily_533248_839829468)(Tsym293834* s0); N_NIMCALL(void, gencase_548826_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); N_NIMCALL(void, genstringcase_548416_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0); N_NIMCALL(void, gencasestringbranch_548100_839829468)(Tcproc530021* p0, Tnode293802* b0, Tloc293816 e0, Ropeobj179006* labl0, Ropeobj179006** branches0, NI branches0Len0); N_NIMCALL(NI64, hashstring_529100_2036603609)(NimStringDesc* s0); N_NIMCALL(Ropeobj179006*, gencasesecondpass_547965_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NI labid0, NI until0); N_NIMCALL(void, exprblock_545103_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(void, gencasegeneric_548087_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0); N_NIMCALL(Ropeobj179006*, genifforcaseuntil_548021_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc293816 a0); N_NIMCALL(void, gencasegenericbranch_547910_839829468)(Tcproc530021* p0, Tnode293802* b0, Tloc293816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj179006* labl0); N_NIMCALL(void, gengotoforcase_546673_839829468)(Tcproc530021* p0, Tnode293802* casestmt0); N_NIMCALL(void, genordinalcase_548724_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0); N_NIMCALL(NI, ifswitchsplitpoint_548615_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(NIM_BOOL, branchhastoobigrange_548575_839829468)(Tnode293802* b0); N_NIMCALL(void, genreturnstmt_546617_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(void, blockleaveactions_546442_839829468)(Tcproc530021* p0, NI howmanytrys0, NI howmanyexcepts0); static N_INLINE(Tnode293802*, pop_319246_1689653243)(Tnodeseq293796** s0); N_NIMCALL(void, genbreakstmt_547444_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(void, genasgn_550239_839829468)(Tcproc530021* p0, Tnode293802* e0, NIM_BOOL fastasgn0); N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_550080_839829468)(Tcproc530021* p0, Tnode293802* asgn0); N_NIMCALL(void, asgnfielddiscriminant_550209_839829468)(Tcproc530021* p0, Tnode293802* e0); N_NIMCALL(void, gendiscriminantcheck_550144_839829468)(Tcproc530021* p0, Tloc293816 a0, Tloc293816 tmp0, Ttype293840* objtype0, Tsym293834* field0); N_NIMCALL(Ropeobj179006*, discriminatortabledecl_537094_839829468)(Tcgen530027* m0, Ttype293840* objtype0, Tsym293834* d0); N_NIMCALL(void, genasmstmt_549659_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(Ropeobj179006*, genasmoremitstmt_549529_839829468)(Tcproc530021* p0, Tnode293802* t0, NIM_BOOL isasmstmt0); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0); N_NIMCALL(void, gentrycpp_548865_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); static N_INLINE(void, gensimpleblock_545095_839829468)(Tcproc530021* p0, Tnode293802* stmts0); N_NIMCALL(void, gentry_549114_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0); N_NIMCALL(NIM_BOOL, isdefined_201011_1967573533)(NimStringDesc* symbol0); N_NIMCALL(void, line_533695_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* r0); static N_INLINE(Ropeobj179006*, pop_179530_1689653243)(TY192350** s0); N_NIMCALL(void, genraisestmt_547828_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(NimStringDesc*, getraisefrmt_547824_839829468)(Tcproc530021* p0); N_NIMCALL(void, gentypesection_539184_839829468)(Tcgen530027* m0, Tnode293802* n0); N_NIMCALL(void, genpragma_550039_839829468)(Tcproc530021* p_550041_839829468, Tnode293802* n0); N_NIMCALL(Tspecialword276003, whichpragma_319911_2616423590)(Tnode293802* n0); N_NIMCALL(void, genemit_549839_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(Tcfilesection530005, determinesection_549819_839829468)(Tnode293802* n0); N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0); N_NIMCALL(void, genbreakpoint_549862_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(void, genwatchpoint_550016_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(Tsym293834*, skipgenericowner_298279_850551059)(Tsym293834* s0); N_NIMCALL(void, genparforstmt_547208_839829468)(Tcproc530021* p0, Tnode293802* t0); N_NIMCALL(void, genstate_545117_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, gengotostate_545144_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, genbreakstate_545229_839829468)(Tcproc530021* p0, Tnode293802* n0); N_NIMCALL(void, registermoduletomain_563243_839829468)(Tsym293834* m0); N_NIMCALL(Ropeobj179006*, getinitname_563235_839829468)(Tsym293834* m0); N_NIMCALL(Ropeobj179006*, getsomeinitname_562904_839829468)(Tsym293834* m0, NimStringDesc* suffix0); N_NIMCALL(Ropeobj179006*, getdatinitname_563239_839829468)(Tsym293834* m0); N_NIMCALL(Tnode293802*, generatemethoddispatchers_433151_3853300031)(void); N_NIMCALL(void, genmainproc_562729_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, genfilenames_562688_839829468)(Tcgen530027* m0); N_NIMCALL(void, finishmodule_564420_839829468)(Tcgen530027* m0); N_NIMCALL(void, updatecachedmodule_564813_839829468)(Tcgen530027* m0); N_NIMCALL(NIM_BOOL, mergerequired_531832_2760143328)(Tcgen530027* m0); N_NIMCALL(void, mergefiles_532241_2760143328)(NimStringDesc* cfilename0, Tcgen530027* m0); N_NIMCALL(void, geninitcode_563286_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, gensectionstart_531081_2760143328)(Tcprocsection530011 ps0); N_NIMCALL(Ropeobj179006*, gensectionend_531116_2760143328)(Tcprocsection530011 ps0); N_NIMCALL(Ropeobj179006*, gensectionstart_531015_2760143328)(Tcfilesection530005 fs0); N_NIMCALL(Ropeobj179006*, gensectionend_531050_2760143328)(Tcfilesection530005 fs0); N_NIMCALL(void, finishtypedescriptions_536842_839829468)(Tcgen530027* m0); N_NIMCALL(Ropeobj179006*, genmodule_563491_839829468)(Tcgen530027* m0, NimStringDesc* cfile0); N_NIMCALL(Ropeobj179006*, getfileheader_562683_839829468)(NimStringDesc* cfile0); N_NIMCALL(Ropeobj179006*, getcopyright_562665_839829468)(NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, getcompilecfilecmd_275284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0); static N_INLINE(void, addinttypes_562659_839829468)(Ropeobj179006** result0); N_NIMCALL(Ropeobj179006*, genmergeinfo_531203_2760143328)(Tcgen530027* m0); N_NIMCALL(void, generatethreadlocalstorage_539717_839829468)(Tcgen530027* m0); N_NIMCALL(void, generateheaders_561104_839829468)(Tcgen530027* m0); N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0); N_NIMCALL(void, writerope_179836_2381377266)(Ropeobj179006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0); N_NIMCALL(void, addfiletocompile_274863_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, addfiletolink_274872_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, writemodule_564637_839829468)(Tcgen530027* m0, NIM_BOOL pending0); N_NIMCALL(void, generatethreadvarssize_539771_839829468)(Tcgen530027* m0); N_NIMCALL(NIM_BOOL, shouldrecompile_564621_839829468)(Ropeobj179006* code0, NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, toobjfile_274859_2528170400)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, writeropeifnotequal_180511_2381377266)(Ropeobj179006* r0, NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0); N_NIMCALL(void, writemapping_275789_2528170400)(Ropeobj179006* gsymbolmapping0); N_NIMCALL(void, writeheader_564152_839829468)(Tcgen530027* m0); N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result); N_NIMCALL(void, resetmodule_563763_839829468)(Tcgen530027* m0); N_NIMCALL(void, nullify_563833_839829468)(Ropeobj179006** arr0); N_NIMCALL(void, nullify_563858_839829468)(Ropeobj179006** arr0); STRING_LITERAL(T839829468_4, "\011", 1); STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17); NIM_CONST TY204018 T839829468_9 = {((NimStringDesc*) &T839829468_10), ((NI) 1158)} ; STRING_LITERAL(T839829468_11, "T", 1); STRING_LITERAL(T839829468_12, "_", 1); STRING_LITERAL(T839829468_13, "added pending module twice: ", 28); STRING_LITERAL(T839829468_14, ".h", 2); STRING_LITERAL(T839829468_15, ".cpp", 4); STRING_LITERAL(T839829468_16, ".m", 2); STRING_LITERAL(T839829468_17, ".c", 2); STRING_LITERAL(T839829468_18, "0", 1); STRING_LITERAL(T839829468_19, "$", 1); STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30); STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15); STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13); STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13); STRING_LITERAL(T839829468_24, "static ", 7); STRING_LITERAL(T839829468_25, "mapType", 7); STRING_LITERAL(T839829468_26, "void", 4); STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24); STRING_LITERAL(T839829468_28, "TY", 2); STRING_LITERAL(T839829468_29, "getTypeName: ", 13); STRING_LITERAL(T839829468_30, "void*", 5); STRING_LITERAL(T839829468_31, "NimStringDesc", 13); STRING_LITERAL(T839829468_32, "NimStringDesc*", 14); STRING_LITERAL(T839829468_33, "NCSTRING", 8); STRING_LITERAL(T839829468_34, "NIM_BOOL", 8); STRING_LITERAL(T839829468_35, "NIM_CHAR", 8); STRING_LITERAL(T839829468_36, "NI", 2); STRING_LITERAL(T839829468_37, "NI8", 3); STRING_LITERAL(T839829468_38, "NI16", 4); STRING_LITERAL(T839829468_39, "NI32", 4); STRING_LITERAL(T839829468_40, "NI64", 4); STRING_LITERAL(T839829468_41, "NF", 2); STRING_LITERAL(T839829468_42, "NF32", 4); STRING_LITERAL(T839829468_43, "NF64", 4); STRING_LITERAL(T839829468_44, "NF128", 5); STRING_LITERAL(T839829468_45, "NU", 2); STRING_LITERAL(T839829468_46, "NU8", 3); STRING_LITERAL(T839829468_47, "NU16", 4); STRING_LITERAL(T839829468_48, "NU32", 4); STRING_LITERAL(T839829468_49, "NU64", 4); NIM_CONST TY534943 Numericaltypetostr_534941_839829468 = {((NimStringDesc*) &T839829468_36), ((NimStringDesc*) &T839829468_37), ((NimStringDesc*) &T839829468_38), ((NimStringDesc*) &T839829468_39), ((NimStringDesc*) &T839829468_40), ((NimStringDesc*) &T839829468_41), ((NimStringDesc*) &T839829468_42), ((NimStringDesc*) &T839829468_43), ((NimStringDesc*) &T839829468_44), ((NimStringDesc*) &T839829468_45), ((NimStringDesc*) &T839829468_46), ((NimStringDesc*) &T839829468_47), ((NimStringDesc*) &T839829468_48), ((NimStringDesc*) &T839829468_49)} ; STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30); STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28); STRING_LITERAL(T839829468_52, "&", 1); STRING_LITERAL(T839829468_53, "*", 1); STRING_LITERAL(T839829468_54, "$1 $2;$n", 8); STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(T839829468_56, "union", 5); STRING_LITERAL(T839829468_57, "struct", 6); STRING_LITERAL(T839829468_58, "getTypeForward(", 15); STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18); STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17); STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18); STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18); STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20); STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(T839829468_65, "N_NIMCALL", 9); STRING_LITERAL(T839829468_66, "N_STDCALL", 9); STRING_LITERAL(T839829468_67, "N_CDECL", 7); STRING_LITERAL(T839829468_68, "N_SAFECALL", 10); STRING_LITERAL(T839829468_69, "N_SYSCALL", 9); STRING_LITERAL(T839829468_70, "N_INLINE", 8); STRING_LITERAL(T839829468_71, "N_NOINLINE", 10); STRING_LITERAL(T839829468_72, "N_FASTCALL", 10); STRING_LITERAL(T839829468_73, "N_CLOSURE", 9); STRING_LITERAL(T839829468_74, "N_NOCONV", 8); NIM_CONST TY293016 Callingconvtostr_534585_839829468 = {((NimStringDesc*) &T839829468_65), ((NimStringDesc*) &T839829468_66), ((NimStringDesc*) &T839829468_67), ((NimStringDesc*) &T839829468_68), ((NimStringDesc*) &T839829468_69), ((NimStringDesc*) &T839829468_70), ((NimStringDesc*) &T839829468_71), ((NimStringDesc*) &T839829468_72), ((NimStringDesc*) &T839829468_73), ((NimStringDesc*) &T839829468_74)} ; STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}" " $1;$n", 69); STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28); STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34); STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31); STRING_LITERAL(T839829468_79, "TGenericSeq", 11); STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39); STRING_LITERAL(T839829468_82, "<", 1); STRING_LITERAL(T839829468_83, " COMMA ", 7); STRING_LITERAL(T839829468_84, "> ", 2); extern NIM_CONST TY274427 Cc_274413_2528170400; STRING_LITERAL(T839829468_85, " {$n", 4); STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24); STRING_LITERAL(T839829468_87, " : public $1 {$n", 16); STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15); STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18); STRING_LITERAL(T839829468_90, "$1.$2", 5); STRING_LITERAL(T839829468_91, "S", 1); STRING_LITERAL(T839829468_92, "struct {", 8); STRING_LITERAL(T839829468_93, "} $1;$n", 7); STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38); STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17); STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18); STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11); STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20); STRING_LITERAL(T839829468_100, "char dummy;$n", 13); STRING_LITERAL(T839829468_101, "};", 2); STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9); STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13); STRING_LITERAL(T839829468_104, "char dummy;", 11); STRING_LITERAL(T839829468_105, "Set", 3); STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18); STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15); STRING_LITERAL(T839829468_109, "genProcParams", 13); STRING_LITERAL(T839829468_110, ", ", 2); STRING_LITERAL(T839829468_111, " ", 1); STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12); STRING_LITERAL(T839829468_113, " Result", 7); STRING_LITERAL(T839829468_114, "void* ClEnv", 11); STRING_LITERAL(T839829468_115, "...", 3); STRING_LITERAL(T839829468_116, "void)", 5); STRING_LITERAL(T839829468_117, ")", 1); STRING_LITERAL(T839829468_118, "(", 1); STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12); STRING_LITERAL(T839829468_120, "proc has no result symbol", 25); STRING_LITERAL(T839829468_121, " register", 9); STRING_LITERAL(T839829468_122, " volatile", 9); STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10); STRING_LITERAL(T839829468_124, "(*$1)", 5); STRING_LITERAL(T839829468_125, ";", 1); STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name " "= $2;$n", 70); STRING_LITERAL(T839829468_127, "NTI$1", 5); STRING_LITERAL(T839829468_128, "(&", 2); STRING_LITERAL(T839829468_129, "TNimType", 8); STRING_LITERAL(T839829468_130, "TNimNode", 8); STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30); STRING_LITERAL(T839829468_132, "0", 1); STRING_LITERAL(T839829468_133, "void*", 5); STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16); STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23); STRING_LITERAL(T839829468_137, "genTypeInfo(", 12); STRING_LITERAL(T839829468_138, "$1[$2]", 6); STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15); STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16); STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29); STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35); STRING_LITERAL(T839829468_147, "$1 a;$n", 7); STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12); STRING_LITERAL(T839829468_149, "LOC", 3); STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13); STRING_LITERAL(T839829468_151, "<string.h>", 10); STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35); STRING_LITERAL(T839829468_153, ".Sup", 4); STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17); STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22); STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35); STRING_LITERAL(T839829468_157, "len", 3); STRING_LITERAL(T839829468_158, "Sup.len", 7); STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(T839829468_160, "}$n", 3); STRING_LITERAL(T839829468_161, "$1.Sup", 6); STRING_LITERAL(T839829468_162, "genTraverseProc", 15); STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18); STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17); STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21); STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16); STRING_LITERAL(T839829468_167, "IL64($1)", 8); STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(T839829468_169, "NIM_TRUE", 8); STRING_LITERAL(T839829468_170, "NIM_FALSE", 9); STRING_LITERAL(T839829468_171, "ULL", 3); STRING_LITERAL(T839829468_172, "(($1) $2)", 9); STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(T839829468_174, "NIM_NIL", 7); STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27); STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23); STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25); STRING_LITERAL(T839829468_179, "genLiteral(", 11); STRING_LITERAL(T839829468_180, "case $1:$n", 10); STRING_LITERAL(T839829468_181, "default:$n", 10); STRING_LITERAL(T839829468_182, "break;$n", 8); STRING_LITERAL(T839829468_183, "} $n", 4); STRING_LITERAL(T839829468_184, "genTraverseProc()", 17); STRING_LITERAL(T839829468_185, "$1.Field$2", 10); STRING_LITERAL(T839829468_186, "$1.ClEnv", 8); STRING_LITERAL(T839829468_187, "$1->data[$2]", 12); STRING_LITERAL(T839829468_188, "a", 1); STRING_LITERAL(T839829468_189, "(*a)", 4); STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(T839829468_191, "$1;$n", 5); STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17); STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17); STRING_LITERAL(T839829468_195, "NI $1;$n", 8); STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32); STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11); STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34); STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26); STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(T839829468_207, "genObjectFields", 15); STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(T839829468_209, "\011return $1;$n", 13); STRING_LITERAL(T839829468_210, "Result", 6); STRING_LITERAL(T839829468_211, "closure generation failed", 25); STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18); STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21); STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18); STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19); STRING_LITERAL(T839829468_216, "$N$1 {$N", 8); STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22); STRING_LITERAL(T839829468_218, "nimFrame", 8); STRING_LITERAL(T839829468_219, "VarSlot", 7); STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25); STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16); STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17); STRING_LITERAL(T839829468_223, "{", 1); STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16); STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51); STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15); STRING_LITERAL(T839829468_227, "}$N", 3); STRING_LITERAL(T839829468_228, "static void* $1;$n", 18); STRING_LITERAL(T839829468_229, "||", 2); STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47); STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57); STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60); STRING_LITERAL(T839829468_233, "loadDynamicLib", 14); STRING_LITERAL(T839829468_234, "Dl_$1", 5); STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21); NIM_CONST TY204018 T839829468_236 = {((NimStringDesc*) &T839829468_10), ((NI) 535)} ; STRING_LITERAL(T839829468_237, "wrong index: ", 13); STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_239, "$2 $1;$n", 8); STRING_LITERAL(T839829468_240, "extern ", 7); STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14); STRING_LITERAL(T839829468_242, " $1;$n", 6); STRING_LITERAL(T839829468_243, "cgsym: ", 7); STRING_LITERAL(T839829468_244, ": ", 2); STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15); STRING_LITERAL(T839829468_246, "extern \"C\" ", 11); STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23); STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26); STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28); STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35); STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34); STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32); STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23); STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35); STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33); STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47); STRING_LITERAL(T839829468_257, ".", 1); STRING_LITERAL(T839829468_258, "ClEnv", 5); STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22); STRING_LITERAL(T839829468_260, "Field$1", 7); STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53); STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50); STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43); STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21); NIM_CONST TY204018 T839829468_264 = {((NimStringDesc*) &T839829468_265), ((NI) 320)} ; STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60); STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63); STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_269, "genAssignment: ", 15); STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48); STRING_LITERAL(T839829468_271, "expr: proc not init ", 20); STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(T839829468_273, "{$n", 3); STRING_LITERAL(T839829468_274, "0x$1,$n", 7); STRING_LITERAL(T839829468_275, "0x$1, ", 6); STRING_LITERAL(T839829468_276, "0x$1}$n", 7); STRING_LITERAL(T839829468_277, "{{$1, $1}", 9); STRING_LITERAL(T839829468_278, ", {", 3); STRING_LITERAL(T839829468_279, ",$n", 3); STRING_LITERAL(T839829468_280, "}", 1); STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 =" " $4;$n", 69); STRING_LITERAL(T839829468_282, "(($1)&$2)", 9); STRING_LITERAL(T839829468_283, "$1,$n", 5); STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(T839829468_285, "expr: var not init ", 19); STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24); STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50); STRING_LITERAL(T839829468_288, "NimTV->", 7); STRING_LITERAL(T839829468_289, "expr: temp not init ", 20); STRING_LITERAL(T839829468_290, "expr: param not init ", 21); STRING_LITERAL(T839829468_291, "expr(", 5); STRING_LITERAL(T839829468_292, "); unknown symbol", 17); STRING_LITERAL(T839829468_293, "//", 2); STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16); STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16); STRING_LITERAL(T839829468_296, "LA", 2); STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18); STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(T839829468_299, "$1: ;$n", 7); STRING_LITERAL(T839829468_300, "!($1)", 5); STRING_LITERAL(T839829468_301, "$1", 2); STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(T839829468_303, "-($1)", 5); STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22); STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19); STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21); STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20); STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22); STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22); STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20); STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19); STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20); STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22); STRING_LITERAL(T839829468_314, "((double) ($1))", 15); STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18); STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18); NIM_CONST TY553655 unarithtab_553653_839829468 = {((NimStringDesc*) &T839829468_300), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_302), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304), ((NimStringDesc*) &T839829468_305), ((NimStringDesc*) &T839829468_306), ((NimStringDesc*) &T839829468_307), ((NimStringDesc*) &T839829468_308), ((NimStringDesc*) &T839829468_309), ((NimStringDesc*) &T839829468_310), ((NimStringDesc*) &T839829468_311), ((NimStringDesc*) &T839829468_312), ((NimStringDesc*) &T839829468_313), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_315), ((NimStringDesc*) &T839829468_316)} ; STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33); STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13); NIM_CONST TY552642 opr_552640_839829468 = {((NimStringDesc*) &T839829468_318), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304)} ; STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22); STRING_LITERAL(T839829468_320, "+", 1); STRING_LITERAL(T839829468_321, "-", 1); STRING_LITERAL(T839829468_322, "/", 1); NIM_CONST TY557764 opr_557762_839829468 = {((NimStringDesc*) &T839829468_320), ((NimStringDesc*) &T839829468_321), ((NimStringDesc*) &T839829468_53), ((NimStringDesc*) &T839829468_322)} ; STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16); STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16); STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13); STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13); STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13); STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(T839829468_341, "($1 == $2)", 10); STRING_LITERAL(T839829468_342, "($1 <= $2)", 10); STRING_LITERAL(T839829468_343, "($1 < $2)", 9); STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(T839829468_351, "($1 != $2)", 10); NIM_CONST TY552828 binarithtab_552826_839829468 = {((NimStringDesc*) &T839829468_325), ((NimStringDesc*) &T839829468_326), ((NimStringDesc*) &T839829468_327), ((NimStringDesc*) &T839829468_328), ((NimStringDesc*) &T839829468_329), ((NimStringDesc*) &T839829468_330), ((NimStringDesc*) &T839829468_331), ((NimStringDesc*) &T839829468_332), ((NimStringDesc*) &T839829468_333), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_336), ((NimStringDesc*) &T839829468_337), ((NimStringDesc*) &T839829468_338), ((NimStringDesc*) &T839829468_339), ((NimStringDesc*) &T839829468_340), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_344), ((NimStringDesc*) &T839829468_345), ((NimStringDesc*) &T839829468_346), ((NimStringDesc*) &T839829468_347), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_348), ((NimStringDesc*) &T839829468_349), ((NimStringDesc*) &T839829468_350), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_351)} ; STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46); STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13); STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13); STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13); STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13); STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13); NIM_CONST TY552281 opr_552279_839829468 = {((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354), ((NimStringDesc*) &T839829468_355), ((NimStringDesc*) &T839829468_356), ((NimStringDesc*) &T839829468_357), ((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354)} ; STRING_LITERAL(T839829468_358, "((NU8)($1))", 11); STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43); STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25); NIM_CONST TY552281 prc64_552274_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361), ((NimStringDesc*) &T839829468_362), ((NimStringDesc*) &T839829468_363), ((NimStringDesc*) &T839829468_364), ((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23); STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23); STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23); STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23); STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23); NIM_CONST TY552281 prc_552269_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366), ((NimStringDesc*) &T839829468_367), ((NimStringDesc*) &T839829468_368), ((NimStringDesc*) &T839829468_369), ((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_370, "($#)($#)", 8); STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18); STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14); STRING_LITERAL(T839829468_373, "#reprBool($1)", 13); STRING_LITERAL(T839829468_374, "#reprChar($1)", 13); STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21); STRING_LITERAL(T839829468_376, "#reprStr($1)", 12); STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16); STRING_LITERAL(T839829468_378, "$1, $1Len0", 10); STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16); STRING_LITERAL(T839829468_380, "$1, $2", 6); STRING_LITERAL(T839829468_381, "genRepr()", 9); STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22); STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16); STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34); STRING_LITERAL(T839829468_385, "($1 - 1)", 8); STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14); STRING_LITERAL(T839829468_387, "binaryStmt", 10); STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11); STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11); NIM_CONST TY558052 opr_558050_839829468 = {((NimStringDesc*) &T839829468_388), ((NimStringDesc*) &T839829468_389)} ; NIM_CONST TY558052 fun64_558055_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; NIM_CONST TY558052 fun_558060_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22); STRING_LITERAL(T839829468_391, "$1->$2 + ", 9); STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24); STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27); STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24); STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31); STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47); STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39); STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16); STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11); STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23); STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18); STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26); STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25); STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13); STRING_LITERAL(T839829468_405, "$1 == 0", 7); STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16); STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18); STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17); STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17); STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18); STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17); STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43); STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14); STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15); STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17); STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25); STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34); STRING_LITERAL(T839829468_418, "($1)", 4); STRING_LITERAL(T839829468_419, "sizeof($1)", 10); STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26); STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23); STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20); STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28); STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23); STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20); STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27); STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16); STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13); STRING_LITERAL(T839829468_430, "(($1) ($2))", 11); STRING_LITERAL(T839829468_431, "($1Len0-1)", 10); STRING_LITERAL(T839829468_432, "$1Len0", 6); STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26); STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21); STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27); STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22); STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23); STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18); STRING_LITERAL(T839829468_439, "genArrayLen()", 13); STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13); STRING_LITERAL(T839829468_441, "$1->len", 7); STRING_LITERAL(T839829468_442, "unaryStmt", 9); STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16); STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18); STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29); STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54); STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46); STRING_LITERAL(T839829468_448, "($1- $2)", 8); STRING_LITERAL(T839829468_449, "$1 |= ((", 8); STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19); STRING_LITERAL(T839829468_451, ")*8));$n", 8); STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10); STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23); STRING_LITERAL(T839829468_454, ")*8)));$n", 9); STRING_LITERAL(T839829468_455, "#countBits32($1)", 16); STRING_LITERAL(T839829468_456, "#countBits64($1)", 16); STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29); STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16); STRING_LITERAL(T839829468_459, "($1 & $2)", 9); STRING_LITERAL(T839829468_460, "($1 | $2)", 9); STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11); STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9); STRING_LITERAL(T839829468_463, "fewCmps", 7); STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(T839829468_465, "$1 == $2", 8); STRING_LITERAL(T839829468_466, " || ", 4); STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30); STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31); STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31); STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(T839829468_472, "genSetOp()", 10); STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13); STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$n", 88); STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);" "$n", 129); STRING_LITERAL(T839829468_478, "|", 1); STRING_LITERAL(T839829468_479, "& ~", 3); STRING_LITERAL(T839829468_480, "^", 1); NIM_CONST TY557428 lookupopr_557426_839829468 = {((NimStringDesc*) &T839829468_476), ((NimStringDesc*) &T839829468_477), ((NimStringDesc*) &T839829468_52), ((NimStringDesc*) &T839829468_478), ((NimStringDesc*) &T839829468_479), ((NimStringDesc*) &T839829468_480)} ; STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16); STRING_LITERAL(T839829468_482, ")==0)", 5); STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(T839829468_484, "genSetOp", 8); STRING_LITERAL(T839829468_485, "$1->data", 8); STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22); STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29); STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26); STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14); STRING_LITERAL(T839829468_490, "", 0); STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22); STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20); STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51); STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9); STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22); STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31); STRING_LITERAL(T839829468_497, ";$n", 3); STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21); NIM_CONST TY204018 T839829468_498 = {((NimStringDesc*) &T839829468_499), ((NI) 423)} ; static NIM_CONST char136Set T839829468_500 = { 0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ; STRING_LITERAL(T839829468_501, "wrong argument count", 20); STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40); NIM_CONST TY204018 T839829468_503 = {((NimStringDesc*) &T839829468_499), ((NI) 328)} ; STRING_LITERAL(T839829468_504, "->", 2); STRING_LITERAL(T839829468_505, ");$n", 4); STRING_LITERAL(T839829468_506, "[", 1); NIM_CONST TY204018 T839829468_507 = {((NimStringDesc*) &T839829468_499), ((NI) 472)} ; STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31); STRING_LITERAL(T839829468_509, "Result: ", 8); STRING_LITERAL(T839829468_510, "];$n", 4); STRING_LITERAL(T839829468_511, "]", 1); NIM_CONST TY204018 T839829468_512 = {((NimStringDesc*) &T839829468_265), ((NI) 925)} ; STRING_LITERAL(T839829468_513, "<stdio.h>", 9); STRING_LITERAL(T839829468_514, ", \"nil\"", 7); STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22); STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15); STRING_LITERAL(T839829468_517, "%s", 2); STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17); STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34); STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62); STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13); STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14); STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28); STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39); STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20); STRING_LITERAL(T839829468_530, "$1 |=((", 7); STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20); STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21); STRING_LITERAL(T839829468_533, "genObjConstr", 12); STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52); STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55); STRING_LITERAL(T839829468_536, "LOC$1.source", 12); STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38); STRING_LITERAL(T839829468_538, "LOC$#.dest", 10); STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46); STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45); STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12); STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50); STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_545, "genTupleElem", 12); STRING_LITERAL(T839829468_546, ".Field$1", 8); STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20); STRING_LITERAL(T839829468_548, "genDeref ", 9); STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17); STRING_LITERAL(T839829468_550, "genRecordField 3", 16); STRING_LITERAL(T839829468_551, ".$1", 3); STRING_LITERAL(T839829468_552, "} $1: ;$n", 9); STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13); STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13); STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19); STRING_LITERAL(T839829468_556, "goto $1;$n", 10); STRING_LITERAL(T839829468_557, "genIf()", 7); STRING_LITERAL(T839829468_558, "->Sup", 5); STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11); STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34); STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26); STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21); STRING_LITERAL(T839829468_563, "chckRangeF", 10); STRING_LITERAL(T839829468_564, "chckRange64", 11); STRING_LITERAL(T839829468_565, "chckRange", 9); STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11); STRING_LITERAL(T839829468_567, "closure to closure created", 26); STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31); STRING_LITERAL(T839829468_569, "while (1) {$n", 13); STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51); STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51); STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50); STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41); STRING_LITERAL(T839829468_574, "TMP$1", 5); STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23); STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9); STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11); STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15); STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46); STRING_LITERAL(T839829468_580, "TMP$#:$n", 8); STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16); STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37); STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_585, "$2* $1;$n", 9); STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34); STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28); STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25); STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31); STRING_LITERAL(T839829468_590, "$#($#);$n", 9); STRING_LITERAL(T839829468_591, "$# = $#;$n", 10); STRING_LITERAL(T839829468_592, "genVarTuple", 11); STRING_LITERAL(T839829468_593, "genConstStmt", 12); STRING_LITERAL(T839829468_594, "for statement not eliminated", 28); STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34); STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33); STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21); STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12); STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9); STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36); STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24); STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14); STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15); STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23); STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18); STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25); STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45); STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17); STRING_LITERAL(T839829468_609, "no loop to break", 16); STRING_LITERAL(T839829468_610, "extern $1", 9); STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62); STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18); STRING_LITERAL(T839829468_613, "\"", 1); STRING_LITERAL(T839829468_614, "\\n\"\012", 4); STRING_LITERAL(T839829468_615, "Exception", 9); STRING_LITERAL(T839829468_616, "E_Base", 6); STRING_LITERAL(T839829468_617, "try {$n", 7); STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30); STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26); STRING_LITERAL(T839829468_620, "else ", 5); STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26); STRING_LITERAL(T839829468_622, "if ($1) ", 8); STRING_LITERAL(T839829468_623, "throw;$n", 8); STRING_LITERAL(T839829468_624, "<setjmp.h>", 10); STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17); STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22); STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12); STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33); STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12); STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39); STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12); STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34); STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23); STRING_LITERAL(T839829468_634, "else {$n", 8); STRING_LITERAL(T839829468_635, "else", 4); STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16); STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46); STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42); STRING_LITERAL(T839829468_639, "if ($1) {$n", 11); STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42); STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39); STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22); STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15); STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14); STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18); STRING_LITERAL(T839829468_646, "bp", 2); STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57); STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47); STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58); STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21); NIM_CONST TY204018 T839829468_650 = {((NimStringDesc*) &T839829468_651), ((NI) 145)} ; STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12); STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26); STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24); STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31); STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39); STRING_LITERAL(T839829468_657, "); unknown node kind", 20); NIM_CONST TY204018 T839829468_658 = {((NimStringDesc*) &T839829468_651), ((NI) 1122)} ; STRING_LITERAL(T839829468_659, "Init000", 7); STRING_LITERAL(T839829468_660, "DatInit000", 10); STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41); STRING_LITERAL(T839829468_662, "\011$1();$N", 8); STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa" "in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N" "imMainInner;$N$2\011(*inner)();$N}$N$N", 162); STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N " " HINSTANCE hPrevInstance, $N LP" "STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program" "_result;$N}$N$N", 206); STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC" "L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()" ";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175); STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N " " LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC" "ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175); STRING_LITERAL(T839829468_667, "<windows.h>", 11); STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59); STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim" "MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void" " (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011(" "*inner)();$N}$N$N", 208); STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48); STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;" "$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog" "ram_result;$N}$N$N", 145); STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21); STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19); STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26); STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40); STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa" "in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner" " = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168); STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30); STRING_LITERAL(T839829468_678, "still forwarded: ", 17); STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42); STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26); STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26); STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25); STRING_LITERAL(T839829468_683, "}$N$N", 5); STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46); STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(T839829468_686, "0.15.0", 6); STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); extern NIM_CONST TY177082 Os_177068_4151366050; extern NIM_CONST TY177510 Cpu_177496_4151366050; STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22); STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20); STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15); STRING_LITERAL(T839829468_692, "#include $1$N", 13); STRING_LITERAL(T839829468_693, "extern \"C\"", 10); STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(T839829468_695, "__$1__", 6); STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17); Tcgen530027* generatedheader_533201_839829468; extern TNimType NTI530015; /* BModule */ Ropeobj179006* indent_533655_839829468; extern TNimType NTI179004; /* Rope */ extern Gcheap49818 gch_49858_1689653243; Ropeobj179006* nimtv_539656_839829468; Ttypeseq293836* nimtvdeps_539674_839829468; extern TNimType NTI293836; /* TTypeSeq */ Intset269030 nimtvdeclared_539675_839829468; extern TNimType NTI269030; /* IntSet */ NI breakpointid_549860_839829468; Ropeobj179006* gbreakpoints_549861_839829468; extern TY530153* gmodules_530170_3723162438; extern TNimType NTI530027; /* TCGen */ extern Debuginfo204009 gdebuginfo_204470_1926258066; extern Toption170009Set goptions_170128_2607990831; extern TNimType NTI293804; /* TSymSeq */ extern Tglobaloption170013Set gglobaloptions_170130_2607990831; extern NimStringDesc* headerfile_170138_2607990831; extern NimStringDesc* gprojectfull_170211_2607990831; extern Tcommands170076 gcmd_170132_2607990831; extern NI gerrorcounter_193072_155036129; extern Ropeobj179006* rnl_179903_2381377266; extern NI gforwardedprocscounter_530171_3723162438; extern TNimType NTI293244; /* TTypeKind */ extern TNimType NTI204017; /* seq[(string, int)] */ extern Tsystemcc274002 ccompiler_274431_2528170400; extern NimStringDesc* tnl_177644_4151366050; extern NI floatsize_177642_4151366050; extern Tgcmode170080 gselectedgc_170133_2607990831; extern TNimType NTI293020; /* TNodeKind */ extern TNimType NTI135002; /* seq[string] */ extern TNimType NTI293435; /* TSymKind */ extern TNimType NTI293816; /* TLoc */ extern NI intsize_177641_4151366050; extern TNimType NTI293524; /* TMagic */ extern TNimType NTI192350; /* seq[Rope] */ extern TNimType NTI293796; /* TNodeSeq */ extern Ropeobj179006* mainmodprocs_530148_3723162438; extern Ropeobj179006* maindatinit_530151_3723162438; extern Ropeobj179006* mainmodinit_530149_3723162438; extern Ropeobj179006* othermodsinit_530150_3723162438; extern Tsystemos177004 targetos_177629_4151366050; extern TY192612* fileinfos_192629_155036129; extern Tsystemcpu177452 targetcpu_177627_4151366050; extern Ropeobj179006* gmapping_530152_3723162438; N_NIMCALL(void, T839829468_2)(void) { nimGCvisit((void*)generatedheader_533201_839829468, 0); } N_NIMCALL(void, T839829468_3)(void) { nimGCvisit((void*)indent_533655_839829468, 0); } static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0) { Cell47305* result0; result0 = (Cell47305*)0; result0 = ((Cell47305*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell47305)))))); return result0; } static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0) { addzct_51417_1689653243((&gch_49858_1689653243.zct), c0); } static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) { { Cell47305* c0; if (!!((src0 == NIM_NIL))) goto LA3; c0 = usrtocell_51440_1689653243(src0); (*c0).refcount += ((NI) 8); } LA3: ; { Cell47305* c0; if (!!(((*dest0) == NIM_NIL))) goto LA7; c0 = usrtocell_51440_1689653243((*dest0)); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11; rtladdzct_52601_1689653243(c0); } LA11: ; } LA7: ; (*dest0) = src0; } N_NIMCALL(void, T839829468_5)(void) { nimGCvisit((void*)nimtv_539656_839829468, 0); } N_NIMCALL(void, T839829468_6)(void) { nimGCvisit((void*)nimtvdeps_539674_839829468, 0); } static N_INLINE(void, nimGCunrefNoCycle)(void* p0) { Cell47305* c0; c0 = usrtocell_51440_1689653243(p0); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } N_NIMCALL(void, T839829468_7)(void) { nimGCvisit((void*)nimtvdeclared_539675_839829468.head, 0); nimGCvisit((void*)nimtvdeclared_539675_839829468.data, 0); } N_NIMCALL(void, T839829468_8)(void) { nimGCvisit((void*)gbreakpoints_549861_839829468, 0); } N_NIMCALL(Tcgen530027*, getcgenmodule_533226_839829468)(Tsym293834* s0) { Tcgen530027* result0; result0 = (Tcgen530027*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (((NI) 0) <= (*s0).position); if (!(LOC3)) goto LA4; LOC3 = ((*s0).position < (gmodules_530170_3723162438 ? gmodules_530170_3723162438->Sup.len : 0)); LA4: ; if (!LOC3) goto LA5; result0 = gmodules_530170_3723162438->data[(*s0).position]; } goto LA1; LA5: ; { result0 = NIM_NIL; } LA1: ; return result0; } static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) { void* LOC1; LOC1 = (void*)0; LOC1 = memcpy(dest0, source0, ((size_t) (size0))); } static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) { copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1))))); (*dest0).Sup.len += (*src0).Sup.len; } N_NIMCALL(NU32, hashowner_533977_839829468)(Tsym293834* s0) { NU32 result0; Tsym293834* m0; Tsym293834* p0; result0 = (NU32)0; m0 = s0; { while (1) { if (!!(((*m0).kind == ((Tsymkind293435) 6)))) goto LA2; m0 = (*m0).owner; } LA2: ; } p0 = (*m0).owner; result0 = register_204121_1926258066((&gdebuginfo_204470_1926258066), (*(*p0).name).s, (*(*m0).name).s); return result0; } static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0) { (*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8))); } static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0) { { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52601_1689653243(c0); } LA3: ; } static N_INLINE(void, asgnRef)(void** dest0, void* src0) { { Cell47305* LOC5; if (!!((src0 == NIM_NIL))) goto LA3; LOC5 = (Cell47305*)0; LOC5 = usrtocell_51440_1689653243(src0); incref_53419_1689653243(LOC5); } LA3: ; { Cell47305* LOC10; if (!!(((*dest0) == NIM_NIL))) goto LA8; LOC10 = (Cell47305*)0; LOC10 = usrtocell_51440_1689653243((*dest0)); decref_53001_1689653243(LOC10); } LA8: ; (*dest0) = src0; } N_NIMCALL(Toption170009Set, initprocoptions_563635_839829468)(Tcgen530027* m0) { Toption170009Set result0; memset((void*)(&result0), 0, sizeof(result0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 13))&31U)))!=0)) goto LA3; result0 = (goptions_170128_2607990831 & ~ 32768); } goto LA1; LA3: ; { result0 = goptions_170128_2607990831; } LA1: ; return result0; } N_NIMCALL(Tcproc530021*, newpreinitproc_563625_839829468)(Tcgen530027* m0) { Tcproc530021* result0; result0 = (Tcproc530021*)0; result0 = newproc_530206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 100000); return result0; } N_NIMCALL(Tcproc530021*, newpostinitproc_563630_839829468)(Tcgen530027* m0) { Tcproc530021* result0; result0 = (Tcproc530021*)0; result0 = newproc_530206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 200000); return result0; } N_NIMCALL(Ropeobj179006*, gettempname_534596_839829468)(Tcgen530027* m0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = rope_179401_2381377266(((NI64) ((*m0).labels))); result0 = HEX26_179418_2381377266((*m0).tmpbase, LOC1); (*m0).labels += ((NI) 1); return result0; } N_NIMCALL(Tcgen530027*, rawnewmodule_563663_839829468)(Tsym293834* module0, NimStringDesc* filename0) { Tcgen530027* result0; NimStringDesc* LOC1; NU32 LOC2; NimStringDesc* LOC3; NimStringDesc* LOC4; NimStringDesc* LOC5; result0 = (Tcgen530027*)0; result0 = (Tcgen530027*) newObj((&NTI530015), sizeof(Tcgen530027)); (*result0).Sup.Sup.m_type = (&NTI530027); LOC1 = (NimStringDesc*)0; LOC2 = (NU32)0; LOC2 = hashowner_533977_839829468(module0); LOC3 = (NimStringDesc*)0; LOC3 = HEX24_8401_1689653243(((NU64) (LOC2))); LOC1 = rawNewString(LOC3->Sup.len + 2); appendString(LOC1, ((NimStringDesc*) &T839829468_11)); appendString(LOC1, LOC3); appendString(LOC1, ((NimStringDesc*) &T839829468_12)); asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_179277_2381377266(LOC1)); initlinkedlist_147031_3771138726((&(*result0).headerfiles)); initintset_269885_2627731572((&(*result0).declaredthings)); initintset_269885_2627731572((&(*result0).declaredprotos)); LOC4 = (NimStringDesc*)0; LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0); if (LOC4) nimGCunrefNoCycle(LOC4); LOC5 = (NimStringDesc*)0; LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0); if (LOC5) nimGCunrefNoCycle(LOC5); initidtable_297019_850551059((&(*result0).typecache)); initidtable_297019_850551059((&(*result0).forwtypecache)); asgnRefNoCycle((void**) (&(*result0).module), module0); initintset_269885_2627731572((&(*result0).typeinfomarker)); asgnRef((void**) (&(*result0).initproc), newproc_530206_3723162438(NIM_NIL, result0)); (*(*result0).initproc).options = initprocoptions_563635_839829468(result0); asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_563625_839829468(result0)); asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_563630_839829468(result0)); initnodetable_297085_850551059((&(*result0).datacache)); if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack); (*result0).typestack = (Ttypeseq293836*) newSeqRC1((&NTI293836), 0); if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs); (*result0).forwardedprocs = (Tsymseq293804*) newSeqRC1((&NTI293804), 0); asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_534596_839829468(result0)); asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_534596_839829468(result0)); { if (!(((*module0).flags &(1U<<((NU)(((Tsymflag293184) 13))&31U)))!=0)) goto LA8; (*result0).flags |= ((NU8)1)<<((((Codegenflag530025) 0))%(sizeof(NU8)*8)); (*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption170009) 15)) % (sizeof(NU32)*8))); (*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption170009) 15)) % (sizeof(NU32)*8))); } LA8: ; return result0; } N_NIMCALL(Tcgen530027*, rawnewmodule_564038_839829468)(Tsym293834* module0) { Tcgen530027* result0; NimStringDesc* LOC1; result0 = (Tcgen530027*)0; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_193264_155036129(((NI32) ((*module0).position))); result0 = rawnewmodule_563663_839829468(module0, LOC1); return result0; } N_NIMCALL(Tcgen530027*, newmodule_564045_839829468)(Tsym293834* module0) { Tcgen530027* result0; result0 = (Tcgen530027*)0; { Tcgen530027* LOC3; NimStringDesc* LOC6; LOC3 = (Tcgen530027*)0; LOC3 = getcgenmodule_533226_839829468(module0); if (!!((LOC3 == NIM_NIL))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_197185_1689653243(T839829468_9); internalerror_197113_155036129(LOC6); } LA4: ; result0 = rawnewmodule_564038_839829468(module0); { if (!((gmodules_530170_3723162438 ? gmodules_530170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9; gmodules_530170_3723162438 = (TY530153*) setLengthSeq(&(gmodules_530170_3723162438)->Sup, sizeof(Tcgen530027*), ((NI) ((NI)((*module0).position + ((NI) 1))))); } LA9: ; asgnRef((void**) (&gmodules_530170_3723162438->data[(*module0).position]), result0); { if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 2))&63U)))!=0)) goto LA13; { NimStringDesc* LOC19; NimStringDesc* LOC20; if (!(((*module0).flags &(1U<<((NU)(((Tsymflag293184) 25))&31U)))!=0)) goto LA17; LOC19 = (NimStringDesc*)0; LOC20 = (NimStringDesc*)0; LOC20 = tofilename_193260_155036129(((NI32) ((*module0).position))); LOC19 = rawNewString(LOC20->Sup.len + 28); appendString(LOC19, ((NimStringDesc*) &T839829468_13)); appendString(LOC19, LOC20); internalerror_197113_155036129(LOC19); } LA17: ; } LA13: ; return result0; } N_NIMCALL(Tpasscontext342002*, myopen_564115_839829468)(Tsym293834* module0) { Tpasscontext342002* result0; Tcgen530027* LOC1; result0 = (Tpasscontext342002*)0; LOC1 = (Tcgen530027*)0; LOC1 = newmodule_564045_839829468(module0); result0 = &LOC1->Sup; { NIM_BOOL LOC4; NimStringDesc* f0; NimStringDesc* LOC13; NimStringDesc* LOC14; LOC4 = (NIM_BOOL)0; LOC4 = ((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 27))&63U)))!=0); if (!(LOC4)) goto LA5; LOC4 = (generatedheader_533201_839829468 == NIM_NIL); LA5: ; if (!LOC4) goto LA6; { if (!(((NI) 0) < (headerfile_170138_2607990831 ? headerfile_170138_2607990831->Sup.len : 0))) goto LA10; f0 = headerfile_170138_2607990831; } goto LA8; LA10: ; { f0 = gprojectfull_170211_2607990831; } LA8: ; LOC13 = (NimStringDesc*)0; LOC13 = completecfilepath_274854_2528170400(f0, NIM_TRUE); LOC14 = (NimStringDesc*)0; LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14)); asgnRef((void**) (&generatedheader_533201_839829468), rawnewmodule_563663_839829468(module0, LOC14)); (*generatedheader_533201_839829468).flags |= ((NU8)1)<<((((Codegenflag530025) 3))%(sizeof(NU8)*8)); } LA6: ; return result0; } N_NIMCALL(NimStringDesc*, getcfile_564204_839829468)(Tcgen530027* m0) { NimStringDesc* result0; NimStringDesc* ext0; NimStringDesc* LOC13; NimStringDesc* LOC14; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; ext0 = copyString(((NimStringDesc*) &T839829468_15)); } goto LA1; LA5: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (gcmd_170132_2607990831 == ((Tcommands170076) 3)); if (LOC8) goto LA9; LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 28))&31U)))!=0); LA9: ; if (!LOC8) goto LA10; ext0 = copyString(((NimStringDesc*) &T839829468_16)); } goto LA1; LA10: ; { ext0 = copyString(((NimStringDesc*) &T839829468_17)); } LA1: ; LOC13 = (NimStringDesc*)0; LOC13 = withpackagename_171073_2607990831((*m0).cfilename); LOC14 = (NimStringDesc*)0; LOC14 = completecfilepath_274854_2528170400(LOC13, NIM_TRUE); result0 = noschangeFileExt(LOC14, ext0); return result0; } N_NIMCALL(Tpasscontext342002*, myopencached_564249_839829468)(Tsym293834* module0, Trodreader333021* rd0) { Tpasscontext342002* result0; Tcgen530027* m0; NimStringDesc* LOC1; result0 = (Tpasscontext342002*)0; m0 = newmodule_564045_839829468(module0); LOC1 = (NimStringDesc*)0; LOC1 = getcfile_564204_839829468(m0); readmergeinfo_531613_2760143328(LOC1, m0); result0 = &m0->Sup; return result0; } static N_INLINE(NIM_BOOL, skipcodegen_342085_2355241294)(Tnode293802* n0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((NI) 0) < gerrorcounter_193072_155036129); return result0; } N_NIMCALL(void, fillloc_533282_839829468)(Tloc293816* a0, Tlockind293808 k0, Ttype293840* typ0, Ropeobj179006* r0, Tstorageloc293812 s0) { { if (!((*a0).k == ((Tlockind293808) 0))) goto LA3; (*a0).k = k0; unsureAsgnRef((void**) (&(*a0).t), typ0); (*a0).s = s0; { if (!((*a0).r == NIM_NIL)) goto LA7; unsureAsgnRef((void**) (&(*a0).r), r0); } LA7: ; } LA3: ; } N_NIMCALL(NIM_BOOL, iskeyword_533960_839829468)(Tident200010* w0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; switch ((*w0).Sup.id) { case ((NI) 200) ... ((NI) 262): case ((NI) 4) ... ((NI) 70): case ((NI) 138): { result0 = NIM_TRUE; goto BeforeRet; } break; default: { result0 = NIM_FALSE; goto BeforeRet; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, manglename_534205_839829468)(Tsym293834* s0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = (*s0).loc.r; { NIM_BOOL keeporigname0; NIM_BOOL LOC5; NIM_BOOL LOC6; NIM_BOOL LOC9; NimStringDesc* LOC10; if (!(result0 == NIM_NIL)) goto LA3; LOC5 = (NIM_BOOL)0; LOC6 = (NIM_BOOL)0; LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0); if (!(LOC6)) goto LA7; LOC6 = ((IL64(2149580812) & (*s0).flags) == 0); LA7: ; LOC5 = LOC6; if (!(LOC5)) goto LA8; LOC9 = (NIM_BOOL)0; LOC9 = iskeyword_533960_839829468((*s0).name); LOC5 = !(LOC9); LA8: ; keeporigname0 = LOC5; LOC10 = (NimStringDesc*)0; LOC10 = mangle_529847_2036603609((*(*s0).name).s); result0 = rope_179277_2381377266(LOC10); { if (!keeporigname0) goto LA13; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_18)); } goto LA11; LA13: ; { TY534289 LOC16; Ropeobj179006* LOC17; Ropeobj179006* LOC18; TY534289 LOC19; Ropeobj179006* LOC20; NU32 LOC21; Ropeobj179006* LOC22; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj179006*)0; LOC17 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0); add_179482_2381377266(&result0, LOC17); LOC18 = (Ropeobj179006*)0; LOC18 = rope_179401_2381377266(((NI64) ((*s0).Sup.id))); add_179482_2381377266(&result0, LOC18); memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ropeobj179006*)0; LOC20 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0); add_179482_2381377266(&result0, LOC20); LOC21 = (NU32)0; LOC21 = hashowner_533977_839829468(s0); LOC22 = (Ropeobj179006*)0; LOC22 = rope_179401_2381377266(((NI64) (LOC21))); add_179482_2381377266(&result0, LOC22); } LA11: ; asgnRefNoCycle((void**) (&(*s0).loc.r), result0); } LA3: ; return result0; } N_NIMCALL(void, fillprocloc_540201_839829468)(Tsym293834* sym0) { { Ropeobj179006* LOC5; if (!((*sym0).loc.k == ((Tlockind293808) 0))) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = manglename_534205_839829468(sym0); fillloc_533282_839829468((&(*sym0).loc), ((Tlockind293808) 7), (*sym0).typ, LOC5, ((Tstorageloc293812) 2)); } LA3: ; } N_NIMCALL(void, useheader_533369_839829468)(Tcgen530027* m0, Tsym293834* sym0) { { NimStringDesc* LOC5; NIM_BOOL LOC6; if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 6))&15U)))!=0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = getstr_298230_850551059((*(*sym0).annex).path); LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5); } LA3: ; } static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) { (*dest0).data[((*dest0).Sup.len)- 0] = c0; (*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0; (*dest0).Sup.len += ((NI) 1); } N_NIMCALL(NIM_BOOL, isactivated_562431_839829468)(Tsym293834* prc0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = !(((*prc0).typ == NIM_NIL)); return result0; } N_NIMCALL(void, addforwardedproc_533203_839829468)(Tcgen530027* m0, Tsym293834* prc0) { (*m0).forwardedprocs = (Tsymseq293804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym293834*)); asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0); ++(*m0).forwardedprocs->Sup.len; gforwardedprocscounter_530171_3723162438 += ((NI) 1); } N_NIMCALL(void, genclinedir_533725_839829468)(Ropeobj179006** r0, NimStringDesc* filename0, NI line0) { { TY533811 LOC5; NimStringDesc* LOC6; if (!((goptions_170128_2607990831 &(1U<<((NU)(((Toption170009) 10))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NimStringDesc*)0; LOC6 = makesinglelinecstring_529835_2036603609(filename0); LOC5[0] = rope_179277_2381377266(LOC6); LOC5[1] = rope_179401_2381377266(((NI64) (line0))); addf_180205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2); } LA3: ; } static N_INLINE(NI, tolinenumber_193415_155036129)(Tlineinfo192336 info0) { NI result0; result0 = (NI)0; result0 = ((NI) (info0.line)); return result0; } N_NIMCALL(NI, safelinenm_533721_839829468)(Tlineinfo192336 info0) { NI result0; result0 = (NI)0; result0 = tolinenumber_193415_155036129(info0); { if (!(result0 < ((NI) 0))) goto LA3; result0 = ((NI) 0); } LA3: ; return result0; } N_NIMCALL(void, genclinedir_533813_839829468)(Ropeobj179006** r0, Tlineinfo192336 info0) { NimStringDesc* LOC1; NI LOC2; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_193264_155036129(info0.fileindex); LOC2 = (NI)0; LOC2 = safelinenm_533721_839829468(info0); genclinedir_533725_839829468(r0, LOC1, LOC2); } N_NIMCALL(Tctypekind530007, mapsettype_534389_839829468)(Ttype293840* typ0) { Tctypekind530007 result0; NI64 LOC1; result0 = (Tctypekind530007)0; LOC1 = (NI64)0; LOC1 = getsize_321135_3876443242(typ0); switch (((NI) (LOC1))) { case ((NI) 1): { result0 = ((Tctypekind530007) 4); } break; case ((NI) 2): { result0 = ((Tctypekind530007) 5); } break; case ((NI) 4): { result0 = ((Tctypekind530007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind530007) 7); } break; default: { result0 = ((Tctypekind530007) 17); } break; } return result0; } N_NIMCALL(Tctypekind530007, maptype_534393_839829468)(Ttype293840* typ0) { Tctypekind530007 result0; result0 = (Tctypekind530007)0; switch ((*typ0).kind) { case ((Ttypekind293244) 0): case ((Ttypekind293244) 7): { result0 = ((Tctypekind530007) 0); } break; case ((Ttypekind293244) 1): { result0 = ((Tctypekind530007) 2); } break; case ((Ttypekind293244) 2): { result0 = ((Tctypekind530007) 1); } break; case ((Ttypekind293244) 19): { result0 = mapsettype_534389_839829468(typ0); } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 4): case ((Ttypekind293244) 16): case ((Ttypekind293244) 48): { result0 = ((Tctypekind530007) 17); } break; case ((Ttypekind293244) 17): case ((Ttypekind293244) 18): { result0 = ((Tctypekind530007) 19); } break; case ((Ttypekind293244) 10): case ((Ttypekind293244) 11): case ((Ttypekind293244) 12): case ((Ttypekind293244) 13): case ((Ttypekind293244) 15): case ((Ttypekind293244) 46): case ((Ttypekind293244) 47): case ((Ttypekind293244) 49): case ((Ttypekind293244) 8): { Ttype293840* LOC8; LOC8 = (Ttype293840*)0; LOC8 = lastson_296377_850551059(typ0); result0 = maptype_534393_839829468(LOC8); } break; case ((Ttypekind293244) 14): { { NI64 LOC12; LOC12 = (NI64)0; LOC12 = firstord_321001_3876443242(typ0); if (!(LOC12 < IL64(0))) goto LA13; result0 = ((Tctypekind530007) 6); } goto LA10; LA13: ; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = getsize_321135_3876443242(typ0); switch (((NI) (LOC16))) { case ((NI) 1): { result0 = ((Tctypekind530007) 13); } break; case ((NI) 2): { result0 = ((Tctypekind530007) 14); } break; case ((NI) 4): { result0 = ((Tctypekind530007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind530007) 7); } break; default: { internalerror_197113_155036129(((NimStringDesc*) &T839829468_25)); } break; } } LA10: ; } break; case ((Ttypekind293244) 20): { result0 = maptype_534393_839829468((*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind293244) 21): case ((Ttypekind293244) 23): case ((Ttypekind293244) 22): { Ttype293840* base0; Ttype293840* LOC24; LOC24 = (Ttype293840*)0; LOC24 = lastson_296377_850551059(typ0); base0 = skiptypes_297099_850551059(LOC24, IL64(211106232576256)); switch ((*base0).kind) { case ((Ttypekind293244) 27): case ((Ttypekind293244) 4): case ((Ttypekind293244) 16): case ((Ttypekind293244) 48): { result0 = ((Tctypekind530007) 18); } break; default: { result0 = ((Tctypekind530007) 20); } break; } } break; case ((Ttypekind293244) 26): { result0 = ((Tctypekind530007) 20); } break; case ((Ttypekind293244) 24): { result0 = ((Tctypekind530007) 22); } break; case ((Ttypekind293244) 25): { { if (!!(((*typ0).callconv == ((Tcallingconvention293002) 8)))) goto LA32; result0 = ((Tctypekind530007) 23); } goto LA30; LA32: ; { result0 = ((Tctypekind530007) 19); } LA30: ; } break; case ((Ttypekind293244) 28): { result0 = ((Tctypekind530007) 21); } break; case ((Ttypekind293244) 29): { result0 = ((Tctypekind530007) 24); } break; case ((Ttypekind293244) 31) ... ((Ttypekind293244) 44): { result0 = ((Tctypekind530007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3)))); } break; case ((Ttypekind293244) 59): { { Ttype293840* LOC43; if (!!(((*typ0).n == NIM_NIL))) goto LA41; LOC43 = (Ttype293840*)0; LOC43 = lastson_296377_850551059(typ0); result0 = maptype_534393_839829468(LOC43); } goto LA39; LA41: ; { internalerror_197113_155036129(((NimStringDesc*) &T839829468_25)); } LA39: ; } break; default: { internalerror_197113_155036129(((NimStringDesc*) &T839829468_25)); } break; } return result0; } N_NIMCALL(NIM_BOOL, isimportedcpptype_534476_839829468)(Ttype293840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, needscomplexassignment_534509_839829468)(Ttype293840* typ0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = containsgarbagecollectedref_321117_3876443242(typ0); return result0; } static N_INLINE(NIM_BOOL, isobjlackingtypefield_534513_839829468)(Ttype293840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; NIM_BOOL LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*typ0).kind == ((Ttypekind293244) 17)); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 2))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL); LA5: ; LOC3 = LOC4; if (LOC3) goto LA6; LOC3 = ispureobject_321138_3876443242(typ0); LA6: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, isinvalidreturntype_534548_839829468)(Ttype293840* rettype0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!(rettype0 == NIM_NIL)) goto LA3; result0 = NIM_TRUE; } goto LA1; LA3: ; { Tctypekind530007 LOC6; LOC6 = (Tctypekind530007)0; LOC6 = maptype_534393_839829468(rettype0); switch (LOC6) { case ((Tctypekind530007) 17): { Ttype293840* LOC8; LOC8 = (Ttype293840*)0; LOC8 = skiptypes_297099_850551059(rettype0, IL64(211106232576256)); result0 = !(((*LOC8).kind == ((Ttypekind293244) 23) || (*LOC8).kind == ((Ttypekind293244) 22) || (*LOC8).kind == ((Ttypekind293244) 21))); } break; case ((Tctypekind530007) 19): { Ttype293840* t0; NIM_BOOL LOC16; NIM_BOOL LOC18; NIM_BOOL LOC20; t0 = skiptypes_297099_850551059(rettype0, IL64(211106232576256)); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = isimportedcpptype_534476_839829468(rettype0); if (LOC12) goto LA13; LOC12 = isimportedcpptype_534476_839829468(t0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; goto BeforeRet; } LA14: ; LOC16 = (NIM_BOOL)0; LOC16 = needscomplexassignment_534509_839829468(t0); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = ((*t0).kind == ((Ttypekind293244) 17)); if (!(LOC18)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = isobjlackingtypefield_534513_839829468(t0); LOC18 = !(LOC20); LA19: ; LOC16 = LOC18; LA17: ; result0 = LOC16; } break; default: { result0 = NIM_FALSE; } break; } } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, typename_534292_839829468)(Ttype293840* typ0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NimStringDesc* LOC5; if (!!(((*typ0).sym == NIM_NIL))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_529847_2036603609((*(*(*typ0).sym).name).s); result0 = rope_179277_2381377266(LOC5); } goto LA1; LA3: ; { TY534289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, gettypename_534313_839829468)(Ttype293840* typ0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*typ0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*typ0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*(*typ0).sym).loc.r; } goto LA1; LA5: ; { { Ropeobj179006* LOC12; Ropeobj179006* LOC13; if (!((*typ0).loc.r == NIM_NIL)) goto LA10; LOC12 = (Ropeobj179006*)0; LOC12 = typename_534292_839829468(typ0); LOC13 = (Ropeobj179006*)0; LOC13 = rope_179401_2381377266(((NI64) ((*typ0).Sup.id))); asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_179418_2381377266(LOC12, LOC13)); } LA10: ; result0 = (*typ0).loc.r; } LA1: ; { NimStringDesc* LOC18; if (!(result0 == NIM_NIL)) goto LA16; LOC18 = (NimStringDesc*)0; LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI293244))->Sup.len + 13); appendString(LOC18, ((NimStringDesc*) &T839829468_29)); appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI293244))); internalerror_197113_155036129(LOC18); } LA16: ; return result0; } N_NIMCALL(Ropeobj179006*, typenameorliteral_534898_839829468)(Ttype293840* t0, NimStringDesc* literal0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = !(((*t0).sym == NIM_NIL)); if (!(LOC4)) goto LA5; LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA6; LOC3 = ((*(*t0).sym).magic == ((Tmagic293524) 0)); LA6: ; if (!LOC3) goto LA7; result0 = gettypename_534313_839829468(t0); } goto LA1; LA7: ; { result0 = rope_179277_2381377266(literal0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, getsimpletypedesc_534936_839829468)(Tcgen530027* m0, Ttype293840* typ0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; switch ((*typ0).kind) { case ((Ttypekind293244) 26): { result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_30)); } break; case ((Ttypekind293244) 28): { Ropeobj179006* LOC3; LOC3 = (Ropeobj179006*)0; LOC3 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_31)); result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_32)); } break; case ((Ttypekind293244) 29): { result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_33)); } break; case ((Ttypekind293244) 1): { result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_34)); } break; case ((Ttypekind293244) 2): { result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_35)); } break; case ((Ttypekind293244) 5): { result0 = typenameorliteral_534898_839829468(typ0, ((NimStringDesc*) &T839829468_18)); } break; case ((Ttypekind293244) 31) ... ((Ttypekind293244) 44): { result0 = typenameorliteral_534898_839829468(typ0, Numericaltypetostr_534941_839829468[((*typ0).kind)- 31]); } break; case ((Ttypekind293244) 13): case ((Ttypekind293244) 20): case ((Ttypekind293244) 15): { result0 = getsimpletypedesc_534936_839829468(m0, (*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind293244) 59): { { Ttype293840* LOC15; if (!!(((*typ0).n == NIM_NIL))) goto LA13; LOC15 = (Ttype293840*)0; LOC15 = lastson_296377_850551059(typ0); result0 = getsimpletypedesc_534936_839829468(m0, LOC15); } goto LA11; LA13: ; { internalerror_197113_155036129(((NimStringDesc*) &T839829468_50)); } LA11: ; } break; case ((Ttypekind293244) 11): { Ttype293840* LOC18; LOC18 = (Ttype293840*)0; LOC18 = lastson_296377_850551059(typ0); result0 = getsimpletypedesc_534936_839829468(m0, LOC18); } break; default: { result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj179006*, cachegettype_534591_839829468)(Tidtable293850 tab0, Ttype293840* key0) { Ropeobj179006* result0; Tidobj200004* LOC1; TNimObject* LOC2; result0 = (Ropeobj179006*)0; LOC1 = (Tidobj200004*)0; LOC1 = &key0->Sup; LOC2 = (TNimObject*)0; LOC2 = idtableget_300086_2984716966(tab0, LOC1); result0 = ((Ropeobj179006*) (LOC2)); return result0; } N_NIMCALL(Ropeobj179006*, gettypepre_534972_839829468)(Tcgen530027* m0, Ttype293840* typ0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!(typ0 == NIM_NIL)) goto LA3; result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_26)); } goto LA1; LA3: ; { result0 = getsimpletypedesc_534936_839829468(m0, typ0); { if (!(result0 == NIM_NIL)) goto LA8; result0 = cachegettype_534591_839829468((*m0).typecache, typ0); } LA8: ; } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, isimportedtype_534449_839829468)(Ttype293840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NimStringDesc*, getforwardstructformat_535015_839829468)(Tcgen530027* m0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; result0 = copyString(((NimStringDesc*) &T839829468_54)); } goto LA1; LA5: ; { result0 = copyString(((NimStringDesc*) &T839829468_55)); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, structorunion_535001_839829468)(Ttype293840* t0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag293431) 1))&31U)))!=0)) goto LA3; result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_56)); } goto LA1; LA3: ; { result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_57)); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, gettypeforward_535039_839829468)(Tcgen530027* m0, Ttype293840* typ0) { Ropeobj179006* result0; { result0 = (Ropeobj179006*)0; result0 = cachegettype_534591_839829468((*m0).forwtypecache, typ0); { if (!!((result0 == NIM_NIL))) goto LA3; goto BeforeRet; } LA3: ; result0 = gettypepre_534972_839829468(m0, typ0); { if (!!((result0 == NIM_NIL))) goto LA7; goto BeforeRet; } LA7: ; switch ((*typ0).kind) { case ((Ttypekind293244) 24): case ((Ttypekind293244) 18): case ((Ttypekind293244) 17): { Tidobj200004* LOC17; TNimObject* LOC18; result0 = gettypename_534313_839829468(typ0); { NIM_BOOL LOC12; NimStringDesc* LOC15; TY533811 LOC16; LOC12 = (NIM_BOOL)0; LOC12 = isimportedtype_534449_839829468(typ0); if (!!(LOC12)) goto LA13; LOC15 = (NimStringDesc*)0; LOC15 = getforwardstructformat_535015_839829468(m0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = structorunion_535001_839829468(typ0); LOC16[1] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 2))- 0], LOC15, LOC16, 2); } LA13: ; LOC17 = (Tidobj200004*)0; LOC17 = &typ0->Sup; LOC18 = (TNimObject*)0; LOC18 = &result0->Sup; idtableput_300094_2984716966((&(*m0).forwtypecache), LOC17, LOC18); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI293244))->Sup.len + 16); appendString(LOC20, ((NimStringDesc*) &T839829468_58)); appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI293244))); appendChar(LOC20, 41); internalerror_197113_155036129(LOC20); } break; } }BeforeRet: ; return result0; } N_NIMCALL(void, pushtype_534958_839829468)(Tcgen530027* m0, Ttype293840* typ0) { (*m0).typestack = (Ttypeseq293836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype293840*)); asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0); ++(*m0).typestack->Sup.len; } N_NIMCALL(Ropeobj179006*, gettypedescweak_535079_839829468)(Tcgen530027* m0, Ttype293840* t0, Intset269030* check0) { Ropeobj179006* result0; Ttype293840* etb0; result0 = (Ropeobj179006*)0; etb0 = skiptypes_297099_850551059(t0, IL64(211106232576256)); switch ((*etb0).kind) { case ((Ttypekind293244) 17): case ((Ttypekind293244) 18): { { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = isimportedcpptype_534476_839829468(etb0); if (!(LOC4)) goto LA5; LOC4 = ((*t0).kind == ((Ttypekind293244) 11)); LA5: ; if (!LOC4) goto LA6; result0 = gettypedescaux_534503_839829468(m0, t0, check0); } goto LA2; LA6: ; { Ttype293840* x0; x0 = getuniquetype_529640_2036603609(etb0); result0 = gettypeforward_535039_839829468(m0, x0); pushtype_534958_839829468(m0, x0); } LA2: ; } break; case ((Ttypekind293244) 24): { Ttype293840* x0; Ropeobj179006* LOC10; x0 = getuniquetype_529640_2036603609(etb0); LOC10 = (Ropeobj179006*)0; LOC10 = gettypeforward_535039_839829468(m0, x0); result0 = HEX26_179447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53)); pushtype_534958_839829468(m0, x0); } break; default: { result0 = gettypedescaux_534503_839829468(m0, t0, check0); } break; } return result0; } static N_INLINE(NI, len_294081_850551059)(Tnode293802* n0) { NI result0; result0 = (NI)0; { if (!(*n0).kindU.S6.sons == 0) goto LA3; result0 = ((NI) 0); } goto LA1; LA3: ; { result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0); } LA1: ; return result0; } N_NIMCALL(void, appcg_533632_839829468)(Tcgen530027* m0, Ropeobj179006** c0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006* LOC1; LOC1 = (Ropeobj179006*)0; LOC1 = ropecg_533407_839829468(m0, frmt0, args0, args0Len0); add_179482_2381377266(c0, LOC1); } N_NIMCALL(NIM_BOOL, scancppgenericslot_535827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) { NIM_BOOL result0; NI begin0; { result0 = (NIM_BOOL)0; (*cursor0) += ((NI) 1); begin0 = (*cursor0); { while (1) { if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2; (*cursor0) += ((NI) 1); } LA2: ; } { if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5; (*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48)))); (*outstars0) = (NI)((*cursor0) - begin0); (*cursor0) += ((NI) 1); result0 = NIM_TRUE; goto BeforeRet; } goto LA3; LA5: ; { result0 = NIM_FALSE; goto BeforeRet; } LA3: ; }BeforeRet: ; return result0; } N_NIMCALL(Ttype293840*, resolvestarsincpptype_535891_839829468)(Ttype293840* typ0, NI idx0, NI stars0) { Ttype293840* result0; result0 = (Ttype293840*)0; { NI LOC3; LOC3 = (NI)0; LOC3 = len_296339_850551059(typ0); if (!(LOC3 <= idx0)) goto LA4; internalerror_197113_155036129(((NimStringDesc*) &T839829468_81)); } LA4: ; result0 = (*typ0).sons->data[idx0]; { NI i_535906_839829468; NI res_535931_839829468; i_535906_839829468 = (NI)0; res_535931_839829468 = ((NI) 1); { while (1) { if (!(res_535931_839829468 <= stars0)) goto LA8; i_535906_839829468 = res_535931_839829468; { NIM_BOOL LOC11; NI LOC13; LOC11 = (NIM_BOOL)0; LOC11 = !((result0 == NIM_NIL)); if (!(LOC11)) goto LA12; LOC13 = (NI)0; LOC13 = len_296339_850551059(result0); LOC11 = (((NI) 0) < LOC13); LA12: ; if (!LOC11) goto LA14; { if (!((*result0).kind == ((Ttypekind293244) 11))) goto LA18; result0 = (*result0).sons->data[((NI) 1)]; } goto LA16; LA18: ; { result0 = elemtype_321394_3876443242(result0); } LA16: ; } LA14: ; res_535931_839829468 += ((NI) 1); } LA8: ; } } return result0; } N_NIMCALL(NimStringDesc*, manglefield_533973_839829468)(Tident200010* name0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = mangle_529847_2036603609((*name0).s); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = iskeyword_533960_839829468(name0); if (!LOC3) goto LA4; result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]); } LA4: ; return result0; } N_NIMCALL(Ropeobj179006*, manglerecfieldname_535361_839829468)(Tsym293834* field0, Ttype293840* rectype0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*rectype0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*field0).loc.r; } goto LA1; LA5: ; { NimStringDesc* LOC8; LOC8 = (NimStringDesc*)0; LOC8 = manglefield_533973_839829468((*field0).name); result0 = rope_179277_2381377266(LOC8); } LA1: ; { if (!(result0 == NIM_NIL)) goto LA11; internalerror_197100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96)); } LA11: ; return result0; } N_NIMCALL(Ropeobj179006*, genrecordfieldsaux_535421_839829468)(Tcgen530027* m0, Tnode293802* n0, Ropeobj179006* accessexpr0, Ttype293840* rectype0, Intset269030* check0) { Ropeobj179006* result0; Ropeobj179006* ae0; Ropeobj179006* uname0; Ropeobj179006* sname0; Ropeobj179006* a0; Tnode293802* k0; Tsym293834* field0; { result0 = (Ropeobj179006*)0; ae0 = (Ropeobj179006*)0; uname0 = (Ropeobj179006*)0; sname0 = (Ropeobj179006*)0; a0 = (Ropeobj179006*)0; k0 = (Tnode293802*)0; field0 = (Tsym293834*)0; result0 = NIM_NIL; switch ((*n0).kind) { case ((Tnodekind293020) 138): { { NI i_535447_839829468; NI HEX3Atmp_535620_839829468; NI LOC3; NI res_535623_839829468; i_535447_839829468 = (NI)0; HEX3Atmp_535620_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_296351_850551059(n0); HEX3Atmp_535620_839829468 = (NI)(LOC3 - ((NI) 1)); res_535623_839829468 = ((NI) 0); { while (1) { Ropeobj179006* LOC6; if (!(res_535623_839829468 <= HEX3Atmp_535620_839829468)) goto LA5; i_535447_839829468 = res_535623_839829468; LOC6 = (Ropeobj179006*)0; LOC6 = genrecordfieldsaux_535421_839829468(m0, (*n0).kindU.S6.sons->data[i_535447_839829468], accessexpr0, rectype0, check0); add_179482_2381377266(&result0, LOC6); res_535623_839829468 += ((NI) 1); } LA5: ; } } } break; case ((Tnodekind293020) 139): { Ropeobj179006* LOC12; NimStringDesc* LOC13; NimStringDesc* LOC14; Ropeobj179006* unionbody0; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)))) goto LA10; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89)); } LA10: ; LOC12 = (Ropeobj179006*)0; LOC12 = genrecordfieldsaux_535421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0); add_179482_2381377266(&result0, LOC12); LOC13 = (NimStringDesc*)0; LOC14 = (NimStringDesc*)0; LOC14 = mangle_529847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); LOC13 = rawNewString(LOC14->Sup.len + 1); appendString(LOC13, LOC14); appendChar(LOC13, 85); uname0 = rope_179277_2381377266(LOC13); { TY533811 LOC19; if (!!((accessexpr0 == NIM_NIL))) goto LA17; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = accessexpr0; LOC19[1] = uname0; ae0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2); } goto LA15; LA17: ; { ae0 = uname0; } LA15: ; unionbody0 = NIM_NIL; { NI i_535491_839829468; NI HEX3Atmp_535629_839829468; NI LOC22; NI res_535632_839829468; i_535491_839829468 = (NI)0; HEX3Atmp_535629_839829468 = (NI)0; LOC22 = (NI)0; LOC22 = sonslen_296351_850551059(n0); HEX3Atmp_535629_839829468 = (NI)(LOC22 - ((NI) 1)); res_535632_839829468 = ((NI) 1); { while (1) { if (!(res_535632_839829468 <= HEX3Atmp_535629_839829468)) goto LA24; i_535491_839829468 = res_535632_839829468; switch ((*(*n0).kindU.S6.sons->data[i_535491_839829468]).kind) { case ((Tnodekind293020) 85): case ((Tnodekind293020) 88): { k0 = lastson_296364_850551059((*n0).kindU.S6.sons->data[i_535491_839829468]); { Ropeobj179006* LOC30; TY533811 LOC31; Ropeobj179006* LOC32; if (!!(((*k0).kind == ((Tnodekind293020) 3)))) goto LA28; LOC30 = (Ropeobj179006*)0; LOC30 = rope_179401_2381377266(((NI64) (i_535491_839829468))); sname0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_91), LOC30); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = ae0; LOC31[1] = sname0; LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2); a0 = genrecordfieldsaux_535421_839829468(m0, k0, LOC32, rectype0, check0); { TY179507 LOC37; if (!!((a0 == NIM_NIL))) goto LA35; add_179487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92)); add_179482_2381377266(&unionbody0, a0); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = sname0; addf_180205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1); } LA35: ; } goto LA26; LA28: ; { Ropeobj179006* LOC39; LOC39 = (Ropeobj179006*)0; LOC39 = genrecordfieldsaux_535421_839829468(m0, k0, ae0, rectype0, check0); add_179482_2381377266(&unionbody0, LOC39); } LA26: ; } break; default: { internalerror_197113_155036129(((NimStringDesc*) &T839829468_94)); } break; } res_535632_839829468 += ((NI) 1); } LA24: ; } } { TY533811 LOC45; if (!!((unionbody0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = unionbody0; LOC45[1] = uname0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2); } LA43: ; } break; case ((Tnodekind293020) 3): { field0 = (*n0).kindU.S4.sym; { if (!((*(*field0).typ).kind == ((Ttypekind293244) 62))) goto LA49; goto BeforeRet; } LA49: ; sname0 = manglerecfieldname_535361_839829468(field0, rectype0); { TY533811 LOC55; if (!!((accessexpr0 == NIM_NIL))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = accessexpr0; LOC55[1] = sname0; ae0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2); } goto LA51; LA53: ; { ae0 = sname0; } LA51: ; fillloc_533282_839829468((&(*field0).loc), ((Tlockind293808) 5), (*field0).typ, ae0, ((Tstorageloc293812) 0)); { NIM_BOOL LOC59; Ttype293840* fieldtype0; LOC59 = (NIM_BOOL)0; LOC59 = isimportedcpptype_534476_839829468(rectype0); if (!!(LOC59)) goto LA60; fieldtype0 = skiptypes_297099_850551059((*field0).loc.t, IL64(211106232576256)); { NIM_BOOL LOC64; TY533811 LOC68; Ttype293840* LOC69; LOC64 = (NIM_BOOL)0; LOC64 = ((*fieldtype0).kind == ((Ttypekind293244) 16)); if (!(LOC64)) goto LA65; LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag293431) 0))&31U)))!=0); LA65: ; if (!LOC64) goto LA66; memset((void*)LOC68, 0, sizeof(LOC68)); LOC69 = (Ttype293840*)0; LOC69 = elemtype_321394_3876443242(fieldtype0); LOC68[0] = gettypedescaux_534503_839829468(m0, LOC69, check0); LOC68[1] = sname0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2); } goto LA62; LA66: ; { TY533811 LOC73; if (!((*fieldtype0).kind == ((Ttypekind293244) 24))) goto LA71; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = gettypedescweak_535079_839829468(m0, (*field0).loc.t, check0); LOC73[1] = sname0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2); } goto LA62; LA71: ; { TY536238 LOC77; NimStringDesc* LOC78; if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75; memset((void*)LOC77, 0, sizeof(LOC77)); LOC77[0] = gettypedescaux_534503_839829468(m0, (*field0).loc.t, check0); LOC77[1] = sname0; LOC78 = (NimStringDesc*)0; LOC78 = nimIntToStr((*field0).kindU.S4.bitsize); LOC77[2] = rope_179277_2381377266(LOC78); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3); } goto LA62; LA75: ; { TY533811 LOC80; memset((void*)LOC80, 0, sizeof(LOC80)); LOC80[0] = gettypedescaux_534503_839829468(m0, (*field0).loc.t, check0); LOC80[1] = sname0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2); } LA62: ; } LA60: ; } break; default: { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99)); } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, getrecordfields_535636_839829468)(Tcgen530027* m0, Ttype293840* typ0, Intset269030* check0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = genrecordfieldsaux_535421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0); return result0; } N_NIMCALL(Ropeobj179006*, getrecorddesc_535643_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0, Intset269030* check0) { Ropeobj179006* result0; NIM_BOOL hasfield0; Ropeobj179006* attribute0; TY536238 LOC6; Ropeobj179006* desc0; NimStringDesc* LOC46; result0 = (Ropeobj179006*)0; hasfield0 = NIM_FALSE; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 21))&31U)))!=0)) goto LA3; attribute0 = rope_179277_2381377266(Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field19); } goto LA1; LA3: ; { attribute0 = NIM_NIL; } LA1: ; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = structorunion_535001_839829468(typ0); LOC6[1] = name0; LOC6[2] = attribute0; result0 = ropecg_533407_839829468(m0, Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field18, LOC6, 3); { if (!((*typ0).kind == ((Ttypekind293244) 17))) goto LA9; { if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; TY534289 LOC23; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = !(((*typ0).sym == NIM_NIL)); if (!(LOC18)) goto LA19; LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0); LA19: ; LOC17 = LOC18; if (LOC17) goto LA20; LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 2))&31U)))!=0); LA20: ; if (!LOC17) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); appcg_533632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0); } goto LA15; LA21: ; { TY533811 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = name0; LOC25[1] = attribute0; appcg_533632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2); hasfield0 = NIM_TRUE; } LA15: ; } goto LA11; LA13: ; { NIM_BOOL LOC27; TY179507 LOC31; Ttype293840* LOC32; LOC27 = (NIM_BOOL)0; LOC27 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC27) goto LA28; LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA28: ; if (!LOC27) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ttype293840*)0; LOC32 = skiptypes_297099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC31[0] = gettypedescaux_534503_839829468(m0, LOC32, check0); appcg_533632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1); hasfield0 = NIM_TRUE; } goto LA11; LA29: ; { TY179507 LOC34; Ttype293840* LOC35; memset((void*)LOC34, 0, sizeof(LOC34)); LOC35 = (Ttype293840*)0; LOC35 = skiptypes_297099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC34[0] = gettypedescaux_534503_839829468(m0, LOC35, check0); appcg_533632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1); hasfield0 = NIM_TRUE; } LA11: ; } goto LA7; LA9: ; { TY179507 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = name0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1); } LA7: ; desc0 = getrecordfields_535636_839829468(m0, typ0, check0); { NIM_BOOL LOC40; TY534289 LOC44; LOC40 = (NIM_BOOL)0; LOC40 = (desc0 == NIM_NIL); if (!(LOC40)) goto LA41; LOC40 = !(hasfield0); LA41: ; if (!LOC40) goto LA42; memset((void*)LOC44, 0, sizeof(LOC44)); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0); } goto LA38; LA42: ; { add_179482_2381377266(&result0, desc0); } LA38: ; LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(tnl_177644_4151366050->Sup.len + 2); appendString(LOC46, ((NimStringDesc*) &T839829468_101)); appendString(LOC46, tnl_177644_4151366050); add_179487_2381377266(&result0, LOC46); return result0; } N_NIMCALL(Ropeobj179006*, gettupledesc_535777_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0, Intset269030* check0) { Ropeobj179006* result0; TY533811 LOC1; Ropeobj179006* desc0; NimStringDesc* LOC13; result0 = (Ropeobj179006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = structorunion_535001_839829468(typ0); LOC1[1] = name0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2); desc0 = NIM_NIL; { NI i_535799_839829468; NI HEX3Atmp_535820_839829468; NI LOC3; NI res_535823_839829468; i_535799_839829468 = (NI)0; HEX3Atmp_535820_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_296327_850551059(typ0); HEX3Atmp_535820_839829468 = (NI)(LOC3 - ((NI) 1)); res_535823_839829468 = ((NI) 0); { while (1) { TY533811 LOC6; if (!(res_535823_839829468 <= HEX3Atmp_535820_839829468)) goto LA5; i_535799_839829468 = res_535823_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = gettypedescaux_534503_839829468(m0, (*typ0).sons->data[i_535799_839829468], check0); LOC6[1] = rope_179401_2381377266(((NI64) (i_535799_839829468))); addf_180205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2); res_535823_839829468 += ((NI) 1); } LA5: ; } } { NimStringDesc* LOC11; if (!(desc0 == NIM_NIL)) goto LA9; LOC11 = (NimStringDesc*)0; LOC11 = rawNewString(tnl_177644_4151366050->Sup.len + 11); appendString(LOC11, ((NimStringDesc*) &T839829468_104)); appendString(LOC11, tnl_177644_4151366050); add_179487_2381377266(&result0, LOC11); } goto LA7; LA9: ; { add_179482_2381377266(&result0, desc0); } LA7: ; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString(tnl_177644_4151366050->Sup.len + 2); appendString(LOC13, ((NimStringDesc*) &T839829468_101)); appendString(LOC13, tnl_177644_4151366050); add_179487_2381377266(&result0, LOC13); return result0; } N_NIMCALL(Ropeobj179006*, gettypedescaux_534503_839829468)(Tcgen530027* m0, Ttype293840* typ0, Intset269030* check0) { Ropeobj179006* result0; Ttype293840* t_535942_839829468; { result0 = (Ropeobj179006*)0; t_535942_839829468 = getuniquetype_529640_2036603609(typ0); { if (!(t_535942_839829468 == NIM_NIL)) goto LA3; internalerror_197113_155036129(((NimStringDesc*) &T839829468_27)); } LA3: ; { if (!!(((*t_535942_839829468).sym == NIM_NIL))) goto LA7; useheader_533369_839829468(m0, (*t_535942_839829468).sym); } LA7: ; result0 = gettypepre_534972_839829468(m0, t_535942_839829468); { if (!!((result0 == NIM_NIL))) goto LA11; goto BeforeRet; } LA11: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_269862_2627731572(check0, (*t_535942_839829468).Sup.id); if (!LOC15) goto LA16; { NIM_BOOL LOC20; NimStringDesc* LOC24; NimStringDesc* LOC25; LOC20 = (NIM_BOOL)0; LOC20 = isimportedcpptype_534476_839829468(typ0); if (LOC20) goto LA21; LOC20 = isimportedcpptype_534476_839829468(t_535942_839829468); LA21: ; if (!!(LOC20)) goto LA22; LOC24 = (NimStringDesc*)0; LOC25 = (NimStringDesc*)0; LOC25 = typetostring_321017_3876443242(typ0, ((Tprefereddesc321011) 0)); LOC24 = rawNewString(LOC25->Sup.len + 28); appendString(LOC24, ((NimStringDesc*) &T839829468_51)); appendString(LOC24, LOC25); internalerror_197113_155036129(LOC24); } LA22: ; } LA16: ; switch ((*t_535942_839829468).kind) { case ((Ttypekind293244) 22): case ((Ttypekind293244) 21): case ((Ttypekind293244) 23): { NimStringDesc* star0; Ttype293840* et0; Ttype293840* LOC38; Ttype293840* etb0; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC33; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*t_535942_839829468).kind == ((Ttypekind293244) 23)); if (!(LOC30)) goto LA31; LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 18))&31U)))!=0)); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC33) goto LA34; LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA34: ; LOC29 = LOC33; LA32: ; if (!LOC29) goto LA35; star0 = copyString(((NimStringDesc*) &T839829468_52)); } goto LA27; LA35: ; { star0 = copyString(((NimStringDesc*) &T839829468_53)); } LA27: ; LOC38 = (Ttype293840*)0; LOC38 = skiptypes_297099_850551059(typ0, IL64(211106232576256)); et0 = lastson_296377_850551059(LOC38); etb0 = skiptypes_297099_850551059(et0, IL64(211106232576256)); { if (!((*etb0).kind == ((Ttypekind293244) 4) || (*etb0).kind == ((Ttypekind293244) 16) || (*etb0).kind == ((Ttypekind293244) 27) || (*etb0).kind == ((Ttypekind293244) 48))) goto LA41; et0 = elemtype_321394_3876443242(etb0); etb0 = skiptypes_297099_850551059(et0, IL64(211106232576256)); star0->data[((NI) 0)] = 42; } LA41: ; switch ((*etb0).kind) { case ((Ttypekind293244) 17): case ((Ttypekind293244) 18): { { NIM_BOOL LOC46; Ropeobj179006* LOC50; LOC46 = (NIM_BOOL)0; LOC46 = isimportedcpptype_534476_839829468(etb0); if (!(LOC46)) goto LA47; LOC46 = ((*et0).kind == ((Ttypekind293244) 11)); LA47: ; if (!LOC46) goto LA48; LOC50 = (Ropeobj179006*)0; LOC50 = gettypedescaux_534503_839829468(m0, et0, check0); result0 = HEX26_179447_2381377266(LOC50, star0); } goto LA44; LA48: ; { Ttype293840* x0; Ropeobj179006* name0; Tidobj200004* LOC52; TNimObject* LOC53; x0 = getuniquetype_529640_2036603609(etb0); name0 = gettypeforward_535039_839829468(m0, x0); result0 = HEX26_179447_2381377266(name0, star0); LOC52 = (Tidobj200004*)0; LOC52 = &t_535942_839829468->Sup; LOC53 = (TNimObject*)0; LOC53 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC52, LOC53); pushtype_534958_839829468(m0, x0); } LA44: ; } break; case ((Ttypekind293244) 24): { Ttype293840* x0; Ropeobj179006* name0; Ropeobj179006* LOC55; Tidobj200004* LOC56; TNimObject* LOC57; x0 = getuniquetype_529640_2036603609(etb0); name0 = gettypeforward_535039_839829468(m0, x0); LOC55 = (Ropeobj179006*)0; LOC55 = HEX26_179447_2381377266(name0, ((NimStringDesc*) &T839829468_53)); result0 = HEX26_179447_2381377266(LOC55, star0); LOC56 = (Tidobj200004*)0; LOC56 = &t_535942_839829468->Sup; LOC57 = (TNimObject*)0; LOC57 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC56, LOC57); pushtype_534958_839829468(m0, x0); } break; default: { Ropeobj179006* LOC59; Tidobj200004* LOC60; TNimObject* LOC61; LOC59 = (Ropeobj179006*)0; LOC59 = gettypedescaux_534503_839829468(m0, et0, check0); result0 = HEX26_179447_2381377266(LOC59, star0); LOC60 = (Tidobj200004*)0; LOC60 = &t_535942_839829468->Sup; LOC61 = (TNimObject*)0; LOC61 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC60, LOC61); } break; } } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { Ropeobj179006* LOC63; Tidobj200004* LOC64; TNimObject* LOC65; LOC63 = (Ropeobj179006*)0; LOC63 = gettypedescweak_535079_839829468(m0, (*t_535942_839829468).sons->data[((NI) 0)], check0); result0 = HEX26_179447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53)); LOC64 = (Tidobj200004*)0; LOC64 = &t_535942_839829468->Sup; LOC65 = (TNimObject*)0; LOC65 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC64, LOC65); } break; case ((Ttypekind293244) 20): case ((Ttypekind293244) 14): { Ttype293840* t0; { if (!((*t_535942_839829468).kind == ((Ttypekind293244) 20))) goto LA69; t0 = lastson_296377_850551059(t_535942_839829468); } goto LA67; LA69: ; { t0 = t_535942_839829468; } LA67: ; result0 = cachegettype_534591_839829468((*m0).typecache, t0); { if (!(result0 == NIM_NIL)) goto LA74; result0 = gettypename_534313_839829468(t0); { NIM_BOOL LOC78; NIM_BOOL LOC80; Tidobj200004* LOC84; TNimObject* LOC85; NI size0; NU32 owner0; LOC78 = (NIM_BOOL)0; LOC78 = isimportedcpptype_534476_839829468(t0); if (LOC78) goto LA79; LOC80 = (NIM_BOOL)0; LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0); if (!(LOC80)) goto LA81; LOC80 = ((*(*t0).sym).magic == ((Tmagic293524) 0)); LA81: ; LOC78 = LOC80; LA79: ; if (!!(LOC78)) goto LA82; LOC84 = (Tidobj200004*)0; LOC84 = &t0->Sup; LOC85 = (TNimObject*)0; LOC85 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC84, LOC85); size0 = (NI)0; { NI64 LOC88; TY179507 LOC91; LOC88 = (NI64)0; LOC88 = firstord_321001_3876443242(t0); if (!(LOC88 < IL64(0))) goto LA89; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1); size0 = ((NI) 4); } goto LA86; LA89: ; { NI64 LOC93; LOC93 = (NI64)0; LOC93 = getsize_321135_3876443242(t0); size0 = ((NI) (LOC93)); switch (size0) { case ((NI) 1): { TY179507 LOC95; memset((void*)LOC95, 0, sizeof(LOC95)); LOC95[0] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1); } break; case ((NI) 2): { TY179507 LOC97; memset((void*)LOC97, 0, sizeof(LOC97)); LOC97[0] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1); } break; case ((NI) 4): { TY179507 LOC99; memset((void*)LOC99, 0, sizeof(LOC99)); LOC99[0] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1); } break; case ((NI) 8): { TY179507 LOC101; memset((void*)LOC101, 0, sizeof(LOC101)); LOC101[0] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1); } break; default: { internalerror_197100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63)); } break; } } LA86: ; owner0 = hashowner_533977_839829468((*t0).sym); { NIM_BOOL LOC105; TY204017* vals0; Enumdesc204007 LOC114; LOC105 = (NIM_BOOL)0; LOC105 = hasenum_204230_1926258066(gdebuginfo_204470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0); if (!!(LOC105)) goto LA106; vals0 = (TY204017*) newSeq((&NTI204017), 0); { NI i_536144_839829468; NI HEX3Atmp_536648_839829468; NI LOC109; NI res_536651_839829468; i_536144_839829468 = (NI)0; HEX3Atmp_536648_839829468 = (NI)0; LOC109 = (NI)0; LOC109 = len_294081_850551059((*t0).n); HEX3Atmp_536648_839829468 = (NI)(LOC109 - ((NI) 1)); res_536651_839829468 = ((NI) 0); { while (1) { Tsym293834* field0; TY204018 LOC112; NimStringDesc* LOC113; if (!(res_536651_839829468 <= HEX3Atmp_536648_839829468)) goto LA111; i_536144_839829468 = res_536651_839829468; field0 = (*(*(*t0).n).kindU.S6.sons->data[i_536144_839829468]).kindU.S4.sym; memset((void*)(&LOC112), 0, sizeof(LOC112)); LOC112.Field0 = copyString((*(*field0).name).s); LOC112.Field1 = (*field0).position; vals0 = (TY204017*) incrSeqV2(&(vals0)->Sup, sizeof(TY204018)); LOC113 = (NimStringDesc*)0; LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0); if (LOC113) nimGCunrefNoCycle(LOC113); vals0->data[vals0->Sup.len].Field1 = LOC112.Field1; ++vals0->Sup.len; res_536651_839829468 += ((NI) 1); } LA111: ; } } memset((void*)(&LOC114), 0, sizeof(LOC114)); memset((void*)(&LOC114), 0, sizeof(LOC114)); LOC114.size = size0; LOC114.owner = owner0; LOC114.id = (*(*t0).sym).Sup.id; LOC114.name = copyString((*(*(*t0).sym).name).s); genericSeqAssign((&LOC114.values), vals0, (&NTI204017)); registerenum_204419_1926258066((&gdebuginfo_204470_1926258066), (&LOC114)); } LA106: ; } LA82: ; } LA74: ; } break; case ((Ttypekind293244) 25): { Tidobj200004* LOC116; TNimObject* LOC117; Ropeobj179006* rettype0; Ropeobj179006* desc0; result0 = gettypename_534313_839829468(t_535942_839829468); LOC116 = (Tidobj200004*)0; LOC116 = &t_535942_839829468->Sup; LOC117 = (TNimObject*)0; LOC117 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC116, LOC117); rettype0 = (Ropeobj179006*)0; desc0 = (Ropeobj179006*)0; genprocparams_535115_839829468(m0, t_535942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE); { NIM_BOOL LOC120; LOC120 = (NIM_BOOL)0; LOC120 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC120)) goto LA121; { TY536235 LOC127; if (!!(((*t_535942_839829468).callconv == ((Tcallingconvention293002) 8)))) goto LA125; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rope_179277_2381377266(Callingconvtostr_534585_839829468[((*t_535942_839829468).callconv)- 0]); LOC127[1] = rettype0; LOC127[2] = result0; LOC127[3] = desc0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4); } goto LA123; LA125: ; { TY536238 LOC129; memset((void*)LOC129, 0, sizeof(LOC129)); LOC129[0] = result0; LOC129[1] = rettype0; LOC129[2] = desc0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3); } LA123: ; } LA121: ; } break; case ((Ttypekind293244) 24): { Tidobj200004* LOC144; Ropeobj179006* LOC145; TNimObject* LOC146; result0 = cachegettype_534591_839829468((*m0).forwtypecache, t_535942_839829468); { Tidobj200004* LOC142; TNimObject* LOC143; if (!(result0 == NIM_NIL)) goto LA133; result0 = gettypename_534313_839829468(t_535942_839829468); { NIM_BOOL LOC137; NimStringDesc* LOC140; TY533811 LOC141; LOC137 = (NIM_BOOL)0; LOC137 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC137)) goto LA138; LOC140 = (NimStringDesc*)0; LOC140 = getforwardstructformat_535015_839829468(m0); memset((void*)LOC141, 0, sizeof(LOC141)); LOC141[0] = structorunion_535001_839829468(t_535942_839829468); LOC141[1] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 2))- 0], LOC140, LOC141, 2); } LA138: ; LOC142 = (Tidobj200004*)0; LOC142 = &t_535942_839829468->Sup; LOC143 = (TNimObject*)0; LOC143 = &result0->Sup; idtableput_300094_2984716966((&(*m0).forwtypecache), LOC142, LOC143); } LA133: ; LOC144 = (Tidobj200004*)0; LOC144 = &t_535942_839829468->Sup; LOC145 = (Ropeobj179006*)0; LOC145 = HEX26_179447_2381377266(result0, ((NimStringDesc*) &T839829468_53)); LOC146 = (TNimObject*)0; LOC146 = &LOC145->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC144, LOC146); { NIM_BOOL LOC149; LOC149 = (NIM_BOOL)0; LOC149 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC149)) goto LA150; { Ttype293840* LOC154; NimStringDesc* LOC157; NimStringDesc* LOC158; TY533811 LOC166; LOC154 = (Ttype293840*)0; LOC154 = skiptypes_297099_850551059((*t_535942_839829468).sons->data[((NI) 0)], IL64(211106232576256)); if (!!(((*LOC154).kind == ((Ttypekind293244) 3)))) goto LA155; LOC157 = (NimStringDesc*)0; LOC158 = (NimStringDesc*)0; { NIM_BOOL LOC161; LOC161 = (NIM_BOOL)0; LOC161 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC161) goto LA162; LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA162: ; if (!LOC161) goto LA163; LOC158 = copyString(((NimStringDesc*) &T839829468_76)); } goto LA159; LA163: ; { LOC158 = copyString(((NimStringDesc*) &T839829468_77)); } LA159: ; LOC157 = rawNewString(LOC158->Sup.len + 31); appendString(LOC157, LOC158); appendString(LOC157, ((NimStringDesc*) &T839829468_78)); memset((void*)LOC166, 0, sizeof(LOC166)); LOC166[0] = gettypedescaux_534503_839829468(m0, (*t_535942_839829468).sons->data[((NI) 0)], check0); LOC166[1] = result0; appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 4))- 0], LOC157, LOC166, 2); } goto LA152; LA155: ; { result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_79)); } LA152: ; } LA150: ; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_53)); } break; case ((Ttypekind293244) 4): case ((Ttypekind293244) 16): { NI64 n0; Tidobj200004* LOC173; TNimObject* LOC174; n0 = lengthord_321007_3876443242(t_535942_839829468); { if (!(n0 <= IL64(0))) goto LA171; n0 = IL64(1); } LA171: ; result0 = gettypename_534313_839829468(t_535942_839829468); LOC173 = (Tidobj200004*)0; LOC173 = &t_535942_839829468->Sup; LOC174 = (TNimObject*)0; LOC174 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC173, LOC174); { NIM_BOOL LOC177; Ropeobj179006* foo0; TY536238 LOC180; LOC177 = (NIM_BOOL)0; LOC177 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC177)) goto LA178; foo0 = gettypedescaux_534503_839829468(m0, (*t_535942_839829468).sons->data[((NI) 1)], check0); memset((void*)LOC180, 0, sizeof(LOC180)); LOC180[0] = foo0; LOC180[1] = result0; LOC180[2] = rope_179401_2381377266(n0); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3); } LA178: ; } break; case ((Ttypekind293244) 17): case ((Ttypekind293244) 18): { { NIM_BOOL LOC184; Ropeobj179006* cppname0; NI i0; NI chunkstart0; Ropeobj179006* LOC226; LOC184 = (NIM_BOOL)0; LOC184 = isimportedcpptype_534476_839829468(t_535942_839829468); if (!(LOC184)) goto LA185; LOC184 = ((*typ0).kind == ((Ttypekind293244) 11)); LA185: ; if (!LOC184) goto LA186; cppname0 = gettypename_534313_839829468(t_535942_839829468); i0 = ((NI) 0); chunkstart0 = ((NI) 0); { while (1) { if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189; { NI chunkend0; NI idx0; NI stars0; if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192; chunkend0 = (i0 - 1); idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC196; NimStringDesc* LOC199; Ttype293840* typeinslot0; LOC196 = (NIM_BOOL)0; LOC196 = scancppgenericslot_535827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0)); if (!LOC196) goto LA197; LOC199 = (NimStringDesc*)0; LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0); add_179487_2381377266(&result0, LOC199); chunkstart0 = i0; typeinslot0 = resolvestarsincpptype_535891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0); { NIM_BOOL LOC202; TY534289 LOC206; Ropeobj179006* LOC207; LOC202 = (NIM_BOOL)0; LOC202 = (typeinslot0 == NIM_NIL); if (LOC202) goto LA203; LOC202 = ((*typeinslot0).kind == ((Ttypekind293244) 62)); LA203: ; if (!LOC202) goto LA204; memset((void*)LOC206, 0, sizeof(LOC206)); LOC207 = (Ropeobj179006*)0; LOC207 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0); add_179482_2381377266(&result0, LOC207); } goto LA200; LA204: ; { Ropeobj179006* LOC209; LOC209 = (Ropeobj179006*)0; LOC209 = gettypedescaux_534503_839829468(m0, typeinslot0, check0); add_179482_2381377266(&result0, LOC209); } LA200: ; } LA197: ; } goto LA190; LA192: ; { i0 += ((NI) 1); } LA190: ; } LA189: ; } { NimStringDesc* LOC215; if (!!((chunkstart0 == ((NI) 0)))) goto LA213; LOC215 = (NimStringDesc*)0; LOC215 = copyStr((*cppname0).data, chunkstart0); add_179487_2381377266(&result0, LOC215); } goto LA211; LA213: ; { result0 = HEX26_179447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82)); { NI i_536516_839829468; NI HEX3Atmp_536664_839829468; NI LOC218; NI res_536667_839829468; i_536516_839829468 = (NI)0; HEX3Atmp_536664_839829468 = (NI)0; LOC218 = (NI)0; LOC218 = len_296339_850551059(typ0); HEX3Atmp_536664_839829468 = (NI)(LOC218 - ((NI) 2)); res_536667_839829468 = ((NI) 1); { while (1) { Ropeobj179006* LOC225; if (!(res_536667_839829468 <= HEX3Atmp_536664_839829468)) goto LA220; i_536516_839829468 = res_536667_839829468; { if (!(((NI) 1) < i_536516_839829468)) goto LA223; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_83)); } LA223: ; LOC225 = (Ropeobj179006*)0; LOC225 = gettypedescaux_534503_839829468(m0, (*typ0).sons->data[i_536516_839829468], check0); add_179482_2381377266(&result0, LOC225); res_536667_839829468 += ((NI) 1); } LA220: ; } } add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_84)); } LA211: ; LOC226 = (Ropeobj179006*)0; LOC226 = getrecorddesc_535643_839829468(m0, t_535942_839829468, result0, check0); } goto LA182; LA186: ; { Tidobj200004* LOC241; TNimObject* LOC242; Ropeobj179006* recdesc0; result0 = cachegettype_534591_839829468((*m0).forwtypecache, t_535942_839829468); { Tidobj200004* LOC239; TNimObject* LOC240; if (!(result0 == NIM_NIL)) goto LA230; result0 = gettypename_534313_839829468(t_535942_839829468); { NIM_BOOL LOC234; NimStringDesc* LOC237; TY533811 LOC238; LOC234 = (NIM_BOOL)0; LOC234 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC234)) goto LA235; LOC237 = (NimStringDesc*)0; LOC237 = getforwardstructformat_535015_839829468(m0); memset((void*)LOC238, 0, sizeof(LOC238)); LOC238[0] = structorunion_535001_839829468(t_535942_839829468); LOC238[1] = result0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 2))- 0], LOC237, LOC238, 2); } LA235: ; LOC239 = (Tidobj200004*)0; LOC239 = &t_535942_839829468->Sup; LOC240 = (TNimObject*)0; LOC240 = &result0->Sup; idtableput_300094_2984716966((&(*m0).forwtypecache), LOC239, LOC240); } LA230: ; LOC241 = (Tidobj200004*)0; LOC241 = &t_535942_839829468->Sup; LOC242 = (TNimObject*)0; LOC242 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC241, LOC242); { if (!!(((*t_535942_839829468).kind == ((Ttypekind293244) 18)))) goto LA245; recdesc0 = getrecorddesc_535643_839829468(m0, t_535942_839829468, result0, check0); } goto LA243; LA245: ; { recdesc0 = gettupledesc_535777_839829468(m0, t_535942_839829468, result0, check0); } LA243: ; { NIM_BOOL LOC250; LOC250 = (NIM_BOOL)0; LOC250 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC250)) goto LA251; add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], recdesc0); } LA251: ; } LA182: ; } break; case ((Ttypekind293244) 19): { Ttype293840* LOC254; Ropeobj179006* LOC255; Tidobj200004* LOC256; TNimObject* LOC257; LOC254 = (Ttype293840*)0; LOC254 = lastson_296377_850551059(t_535942_839829468); LOC255 = (Ropeobj179006*)0; LOC255 = gettypename_534313_839829468(LOC254); result0 = HEX26_179447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105)); LOC256 = (Tidobj200004*)0; LOC256 = &t_535942_839829468->Sup; LOC257 = (TNimObject*)0; LOC257 = &result0->Sup; idtableput_300094_2984716966((&(*m0).typecache), LOC256, LOC257); { NIM_BOOL LOC260; NI s0; NI64 LOC263; LOC260 = (NIM_BOOL)0; LOC260 = isimportedtype_534449_839829468(t_535942_839829468); if (!!(LOC260)) goto LA261; LOC263 = (NI64)0; LOC263 = getsize_321135_3876443242(t_535942_839829468); s0 = ((NI) (LOC263)); switch (s0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { TY533811 LOC265; memset((void*)LOC265, 0, sizeof(LOC265)); LOC265[0] = result0; LOC265[1] = rope_179401_2381377266(((NI64) ((NI)(s0 * ((NI) 8))))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2); } break; default: { TY533811 LOC267; NI64 LOC268; memset((void*)LOC267, 0, sizeof(LOC267)); LOC267[0] = result0; LOC268 = (NI64)0; LOC268 = getsize_321135_3876443242(t_535942_839829468); LOC267[1] = rope_179401_2381377266(LOC268); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2); } break; } } LA261: ; } break; case ((Ttypekind293244) 11): case ((Ttypekind293244) 13): case ((Ttypekind293244) 15): case ((Ttypekind293244) 46): case ((Ttypekind293244) 47): case ((Ttypekind293244) 49): case ((Ttypekind293244) 8): { Ttype293840* LOC270; LOC270 = (Ttype293840*)0; LOC270 = lastson_296377_850551059(t_535942_839829468); result0 = gettypedescaux_534503_839829468(m0, LOC270, check0); } break; default: { NimStringDesc* LOC272; LOC272 = (NimStringDesc*)0; LOC272 = rawNewString(reprEnum((NI)(*t_535942_839829468).kind, (&NTI293244))->Sup.len + 16); appendString(LOC272, ((NimStringDesc*) &T839829468_108)); appendString(LOC272, reprEnum((NI)(*t_535942_839829468).kind, (&NTI293244))); appendChar(LOC272, 41); internalerror_197113_155036129(LOC272); result0 = NIM_NIL; } break; } excl_269841_2627731572(check0, (*t_535942_839829468).Sup.id); }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, iscompiletimeonly_329706_3876443242)(Ttype293840* t0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((*t0).kind == ((Ttypekind293244) 8) || (*t0).kind == ((Ttypekind293244) 59)); return result0; } N_NIMCALL(Tstorageloc293812, paramstorageloc_535098_839829468)(Tsym293834* param0) { Tstorageloc293812 result0; result0 = (Tstorageloc293812)0; { Ttype293840* LOC3; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059((*param0).typ, 8388864); if (!!(((*LOC3).kind == ((Ttypekind293244) 16) || (*LOC3).kind == ((Ttypekind293244) 27) || (*LOC3).kind == ((Ttypekind293244) 48) || (*LOC3).kind == ((Ttypekind293244) 4)))) goto LA4; result0 = ((Tstorageloc293812) 2); } goto LA1; LA4: ; { result0 = ((Tstorageloc293812) 0); } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, ccgintroducedptr_534609_839829468)(Tsym293834* s0) { NIM_BOOL result0; Ttype293840* pt0; { result0 = (NIM_BOOL)0; pt0 = skiptypes_297099_850551059((*s0).typ, IL64(211106232576256)); { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag293431) 13))&31U)))!=0)) goto LA3; result0 = NIM_TRUE; goto BeforeRet; } goto LA1; LA3: ; { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag293431) 12))&31U)))!=0)) goto LA6; result0 = NIM_FALSE; goto BeforeRet; } goto LA1; LA6: ; LA1: ; switch ((*pt0).kind) { case ((Ttypekind293244) 17): { { NIM_BOOL LOC11; NI64 LOC13; LOC11 = (NIM_BOOL)0; LOC11 = (((*s0).options &(1U<<((NU)(((Toption170009) 18))&31U)))!=0); if (LOC11) goto LA12; LOC13 = (NI64)0; LOC13 = getsize_321135_3876443242(pt0); LOC11 = (((NI64) ((NI)(floatsize_177642_4151366050 * ((NI) 2)))) < LOC13); LA12: ; if (!LOC11) goto LA14; result0 = NIM_TRUE; } goto LA9; LA14: ; { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag293431) 2))&31U)))!=0); if (!(LOC17)) goto LA18; LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL); LA18: ; if (!LOC17) goto LA19; result0 = NIM_FALSE; } goto LA9; LA19: ; { result0 = NIM_TRUE; } LA9: ; } break; case ((Ttypekind293244) 18): { NIM_BOOL LOC23; NI64 LOC24; LOC23 = (NIM_BOOL)0; LOC24 = (NI64)0; LOC24 = getsize_321135_3876443242(pt0); LOC23 = (((NI64) ((NI)(floatsize_177642_4151366050 * ((NI) 2)))) < LOC24); if (LOC23) goto LA25; LOC23 = (((*s0).options &(1U<<((NU)(((Toption170009) 18))&31U)))!=0); LA25: ; result0 = LOC23; } break; default: { result0 = NIM_FALSE; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Tctypekind530007, mapreturntype_534445_839829468)(Ttype293840* typ0) { Tctypekind530007 result0; result0 = (Tctypekind530007)0; result0 = maptype_534393_839829468(typ0); return result0; } N_NIMCALL(void, genprocparams_535115_839829468)(Tcgen530027* m0, Ttype293840* t0, Ropeobj179006** rettype0, Ropeobj179006** params0, Intset269030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) { unsureAsgnRef((void**) (&(*params0)), NIM_NIL); { NIM_BOOL LOC3; TY534289 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL); if (LOC3) goto LA4; LOC3 = isinvalidreturntype_534548_839829468((*t0).sons->data[((NI) 0)]); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); unsureAsgnRef((void**) (&(*rettype0)), HEX25_179905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0)); } goto LA1; LA5: ; { unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_534503_839829468(m0, (*t0).sons->data[((NI) 0)], check0)); } LA1: ; { NI i_535152_839829468; NI HEX3Atmp_535353_839829468; NI LOC10; NI res_535356_839829468; i_535152_839829468 = (NI)0; HEX3Atmp_535353_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = sonslen_296351_850551059((*t0).n); HEX3Atmp_535353_839829468 = (NI)(LOC10 - ((NI) 1)); res_535356_839829468 = ((NI) 1); { while (1) { if (!(res_535356_839829468 <= HEX3Atmp_535353_839829468)) goto LA12; i_535152_839829468 = res_535356_839829468; { Tsym293834* param0; Ropeobj179006* LOC29; Tstorageloc293812 LOC30; TY534289 LOC45; Ropeobj179006* LOC46; Ttype293840* arr0; NI j0; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_535152_839829468]).kind == ((Tnodekind293020) 3)))) goto LA16; internalerror_197100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109)); } LA16: ; param0 = (*(*(*t0).n).kindU.S6.sons->data[i_535152_839829468]).kindU.S4.sym; { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = iscompiletimeonly_329706_3876443242((*param0).typ); if (!LOC20) goto LA21; goto LA13; } LA21: ; { TY534289 LOC27; Ropeobj179006* LOC28; if (!!(((*params0) == NIM_NIL))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj179006*)0; LOC28 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0); add_179482_2381377266(params0, LOC28); } LA25: ; LOC29 = (Ropeobj179006*)0; LOC29 = manglename_534205_839829468(param0); LOC30 = (Tstorageloc293812)0; LOC30 = paramstorageloc_535098_839829468(param0); fillloc_533282_839829468((&(*param0).loc), ((Tlockind293808) 4), (*param0).typ, LOC29, LOC30); { NIM_BOOL LOC33; Ropeobj179006* LOC36; TY534289 LOC37; Ropeobj179006* LOC38; LOC33 = (NIM_BOOL)0; LOC33 = ccgintroducedptr_534609_839829468(param0); if (!LOC33) goto LA34; LOC36 = (Ropeobj179006*)0; LOC36 = gettypedescweak_535079_839829468(m0, (*param0).typ, check0); add_179482_2381377266(params0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj179006*)0; LOC38 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0); add_179482_2381377266(params0, LOC38); (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag293810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc293812) 0); } goto LA31; LA34: ; { Ropeobj179006* LOC42; if (!weakdep0) goto LA40; LOC42 = (Ropeobj179006*)0; LOC42 = gettypedescweak_535079_839829468(m0, (*param0).typ, check0); add_179482_2381377266(params0, LOC42); } goto LA31; LA40: ; { Ropeobj179006* LOC44; LOC44 = (Ropeobj179006*)0; LOC44 = gettypedescaux_534503_839829468(m0, (*param0).typ, check0); add_179482_2381377266(params0, LOC44); } LA31: ; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj179006*)0; LOC46 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0); add_179482_2381377266(params0, LOC46); add_179482_2381377266(params0, (*param0).loc.r); arr0 = (*param0).typ; { if (!((*arr0).kind == ((Ttypekind293244) 23))) goto LA49; arr0 = (*arr0).sons->data[((NI) 0)]; } LA49: ; j0 = ((NI) 0); { while (1) { TY533811 LOC57; if (!((*arr0).kind == ((Ttypekind293244) 27) || (*arr0).kind == ((Ttypekind293244) 48))) goto LA52; { if (!((*(*param0).typ).kind == ((Ttypekind293244) 23))) goto LA55; (*param0).loc.s = ((Tstorageloc293812) 0); } LA55: ; memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = (*param0).loc.r; LOC57[1] = rope_179401_2381377266(((NI64) (j0))); addf_180205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2); j0 += ((NI) 1); arr0 = (*arr0).sons->data[((NI) 0)]; } LA52: ; } } LA13: ; res_535356_839829468 += ((NI) 1); } LA12: ; } } { NIM_BOOL LOC60; Ttype293840* arr0; TY534289 LOC76; LOC60 = (NIM_BOOL)0; LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); if (!(LOC60)) goto LA61; LOC60 = isinvalidreturntype_534548_839829468((*t0).sons->data[((NI) 0)]); LA61: ; if (!LOC60) goto LA62; arr0 = (*t0).sons->data[((NI) 0)]; { if (!!(((*params0) == NIM_NIL))) goto LA66; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA66: ; { Tctypekind530007 LOC70; Ropeobj179006* LOC73; LOC70 = (Tctypekind530007)0; LOC70 = mapreturntype_534445_839829468((*t0).sons->data[((NI) 0)]); if (!!((LOC70 == ((Tctypekind530007) 17)))) goto LA71; LOC73 = (Ropeobj179006*)0; LOC73 = gettypedescweak_535079_839829468(m0, arr0, check0); add_179482_2381377266(params0, LOC73); add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_53)); } goto LA68; LA71: ; { Ropeobj179006* LOC75; LOC75 = (Ropeobj179006*)0; LOC75 = gettypedescaux_534503_839829468(m0, arr0, check0); add_179482_2381377266(params0, LOC75); } LA68: ; memset((void*)LOC76, 0, sizeof(LOC76)); addf_180205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0); } LA62: ; { NIM_BOOL LOC79; LOC79 = (NIM_BOOL)0; LOC79 = ((*t0).callconv == ((Tcallingconvention293002) 8)); if (!(LOC79)) goto LA80; LOC79 = declareenvironment0; LA80: ; if (!LOC79) goto LA81; { if (!!(((*params0) == NIM_NIL))) goto LA85; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA85: ; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_114)); } LA81: ; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag293431) 0))&31U)))!=0)) goto LA89; { if (!!(((*params0) == NIM_NIL))) goto LA93; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA93: ; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_115)); } LA89: ; { if (!((*params0) == NIM_NIL)) goto LA97; add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_116)); } goto LA95; LA97: ; { add_179487_2381377266(params0, ((NimStringDesc*) &T839829468_117)); } LA95: ; unsureAsgnRef((void**) (&(*params0)), HEX26_179452_2381377266(((NimStringDesc*) &T839829468_118), (*params0))); } N_NIMCALL(Ropeobj179006*, genprocheader_536867_839829468)(Tcgen530027* m0, Tsym293834* prc0) { Ropeobj179006* result0; Ropeobj179006* rettype0; Ropeobj179006* params0; Intset269030 check0; Ropeobj179006* LOC13; result0 = (Ropeobj179006*)0; rettype0 = (Ropeobj179006*)0; params0 = (Ropeobj179006*)0; genclinedir_533813_839829468(&result0, (*prc0).info); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 5))&15U)))!=0)) goto LA3; { if (!(((*m0).flags &(1U<<((NU)(((Codegenflag530025) 3))&7U)))!=0)) goto LA7; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } goto LA5; LA7: ; { add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_23)); } LA5: ; } goto LA1; LA3: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention293002) 5))) goto LA11; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_24)); } goto LA1; LA11: ; LA1: ; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_269885_2627731572((&check0)); LOC13 = (Ropeobj179006*)0; LOC13 = manglename_534205_839829468(prc0); fillloc_533282_839829468((&(*prc0).loc), ((Tlockind293808) 7), (*prc0).typ, LOC13, ((Tstorageloc293812) 0)); genprocparams_535115_839829468(m0, (*prc0).typ, &rettype0, &params0, (&check0), NIM_TRUE, NIM_FALSE); { TY536235 LOC18; if (!(*prc0).constraint == 0) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_179277_2381377266(Callingconvtostr_534585_839829468[((*(*prc0).typ).callconv)- 0]); LOC18[1] = rettype0; LOC18[2] = (*prc0).loc.r; LOC18[3] = params0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4); } goto LA14; LA16: ; { TY536238 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rettype0; LOC20[1] = (*prc0).loc.r; LOC20[2] = params0; result0 = HEX25_179905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3); } LA14: ; return result0; } static N_INLINE(Tnode293802*, HEX5BHEX5D_294238_850551059)(Tnode293802* n0, NI i0) { Tnode293802* result0; result0 = (Tnode293802*)0; result0 = (*n0).kindU.S6.sons->data[i0]; return result0; } N_NIMCALL(Tnode293802*, easyresultasgn_561191_839829468)(Tnode293802* n0) { Tnode293802* result0; { result0 = (Tnode293802*)0; switch ((*n0).kind) { case ((Tnodekind293020) 115): case ((Tnodekind293020) 126): { NI i0; i0 = ((NI) 0); { while (1) { NIM_BOOL LOC4; NI LOC5; Tnode293802* LOC7; LOC4 = (NIM_BOOL)0; LOC5 = (NI)0; LOC5 = len_294081_850551059(n0); LOC4 = (i0 < LOC5); if (!(LOC4)) goto LA6; LOC7 = (Tnode293802*)0; LOC7 = HEX5BHEX5D_294238_850551059(n0, i0); LOC4 = ((*LOC7).kind == ((Tnodekind293020) 1) || (*LOC7).kind >= ((Tnodekind293020) 79) && (*LOC7).kind <= ((Tnodekind293020) 81) || (*LOC7).kind == ((Tnodekind293020) 84) || (*LOC7).kind == ((Tnodekind293020) 98) || (*LOC7).kind == ((Tnodekind293020) 101) || (*LOC7).kind == ((Tnodekind293020) 125)); LA6: ; if (!LOC4) goto LA3; i0 += ((NI) 1); } LA3: ; } { NI LOC10; Tnode293802* LOC13; LOC10 = (NI)0; LOC10 = len_294081_850551059(n0); if (!(i0 < LOC10)) goto LA11; LOC13 = (Tnode293802*)0; LOC13 = HEX5BHEX5D_294238_850551059(n0, i0); result0 = easyresultasgn_561191_839829468(LOC13); } LA11: ; } break; case ((Tnodekind293020) 73): case ((Tnodekind293020) 74): { { NIM_BOOL LOC17; Tnode293802* LOC18; Tnode293802* LOC20; LOC17 = (NIM_BOOL)0; LOC18 = (Tnode293802*)0; LOC18 = HEX5BHEX5D_294238_850551059(n0, ((NI) 0)); LOC17 = ((*LOC18).kind == ((Tnodekind293020) 3)); if (!(LOC17)) goto LA19; LOC20 = (Tnode293802*)0; LOC20 = HEX5BHEX5D_294238_850551059(n0, ((NI) 0)); LOC17 = (((Tsymkind293435) 11) == (*(*LOC20).kindU.S4.sym).kind); LA19: ; if (!LOC17) goto LA21; (*n0).flags |= ((NU16)1)<<((((Tnodeflag293427) 14))%(sizeof(NU16)*8)); result0 = HEX5BHEX5D_294238_850551059(n0, ((NI) 1)); goto BeforeRet; } LA21: ; } break; case ((Tnodekind293020) 109): { { NI LOC26; Tnode293802* LOC29; LOC26 = (NI)0; LOC26 = len_294081_850551059(n0); if (!(((NI) 0) < LOC26)) goto LA27; LOC29 = (Tnode293802*)0; LOC29 = HEX5BHEX5D_294238_850551059(n0, ((NI) 0)); result0 = easyresultasgn_561191_839829468(LOC29); { if (!!((result0 == NIM_NIL))) goto LA32; (*n0).flags |= ((NU16)1)<<((((Tnodeflag293427) 14))%(sizeof(NU16)*8)); } LA32: ; } LA27: ; } break; default: { } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, gettypedesc_536671_839829468)(Tcgen530027* m0, Ttype293840* typ0) { Ropeobj179006* result0; Intset269030 check0; result0 = (Ropeobj179006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_269885_2627731572((&check0)); result0 = gettypedescaux_534503_839829468(m0, typ0, (&check0)); return result0; } N_NIMCALL(Ropeobj179006*, localvardecl_539532_839829468)(Tcproc530021* p0, Tsym293834* s0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { Ropeobj179006* LOC5; if (!((*s0).loc.k == ((Tlockind293808) 0))) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = manglename_534205_839829468(s0); fillloc_533282_839829468((&(*s0).loc), ((Tlockind293808) 2), (*s0).typ, LOC5, ((Tstorageloc293812) 2)); { if (!((*s0).kind == ((Tsymkind293435) 9))) goto LA8; (*s0).loc.flags |= ((NU16)1)<<((((Tlocflag293810) 2))%(sizeof(NU16)*8)); } LA8: ; } LA3: ; result0 = gettypedesc_536671_839829468((*p0).module, (*s0).loc.t); { if (!(*s0).constraint == 0) goto LA12; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 8))&31U)))!=0)) goto LA16; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_121)); } LA16: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 7))&31U)))!=0)) goto LA20; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_122)); } LA20: ; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_111)); add_179482_2381377266(&result0, (*s0).loc.r); } goto LA10; LA12: ; { TY533811 LOC23; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = result0; LOC23[1] = (*s0).loc.r; result0 = HEX25_179905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2); } LA10: ; return result0; } N_NIMCALL(void, initloc_533273_839829468)(Tloc293816* result0, Tlockind293808 k0, Ttype293840* typ0, Tstorageloc293812 s0) { (*result0).k = k0; (*result0).s = s0; unsureAsgnRef((void**) (&(*result0).t), typ0); unsureAsgnRef((void**) (&(*result0).r), NIM_NIL); (*result0).flags = 0; } N_NIMCALL(void, initlocexprsingleuse_540289_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* result0) { initloc_533273_839829468(result0, ((Tlockind293808) 0), (*e0).typ, ((Tstorageloc293812) 0)); (*result0).flags |= ((NU16)1)<<((((Tlocflag293810) 8))%(sizeof(NU16)*8)); expr_540248_839829468(p0, e0, result0); } static N_INLINE(Ropeobj179006**, s_530179_3723162438)(Tcproc530021* p0, Tcprocsection530011 s0) { Ropeobj179006** result0; result0 = (Ropeobj179006**)0; result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0]; return result0; } N_NIMCALL(Ropeobj179006*, indentline_533656_839829468)(Tcproc530021* p0, Ropeobj179006* r0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = r0; { NI i_533680_839829468; NI HEX3Atmp_533683_839829468; NI res_533686_839829468; i_533680_839829468 = (NI)0; HEX3Atmp_533683_839829468 = (NI)0; HEX3Atmp_533683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); res_533686_839829468 = ((NI) 0); { while (1) { if (!(res_533686_839829468 <= HEX3Atmp_533683_839829468)) goto LA3; i_533680_839829468 = res_533686_839829468; prepend_179893_2381377266(&result0, indent_533655_839829468); res_533686_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(void, linefmt_533714_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; Ropeobj179006* LOC3; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj179006*)0; LOC3 = indentline_533656_839829468(p0, LOC2); add_179482_2381377266(LOC1, LOC3); } N_NIMCALL(Ropeobj179006*, rdloc_539188_839829468)(Tloc293816 a0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = a0.r; { TY179507 LOC5; if (!((a0.flags &(1U<<((NU)(((Tlocflag293810) 0))&15U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = result0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(void, line_533690_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, Ropeobj179006* r0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = indentline_533656_839829468(p0, r0); add_179482_2381377266(LOC1, LOC2); } N_NIMCALL(void, linef_533700_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; Ropeobj179006* LOC3; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = HEX25_179905_2381377266(frmt0, args0, args0Len0); LOC3 = (Ropeobj179006*)0; LOC3 = indentline_533656_839829468(p0, LOC2); add_179482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentypeinfoauxbase_536960_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0, Ropeobj179006* base0) { NI nimtypekind0; Ropeobj179006* size0; TY536235 LOC17; NI flags0; Ropeobj179006* LOC33; TY533811 LOC34; NimStringDesc* LOC35; nimtypekind0 = (NI)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isobjlackingtypefield_534513_839829468(typ0); if (!LOC3) goto LA4; nimtypekind0 = ((NI) 18); } goto LA1; LA4: ; { nimtypekind0 = ((NI) ((*typ0).kind)); } LA1: ; size0 = (Ropeobj179006*)0; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 0))&31U)))!=0)) goto LA9; size0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_133)); } goto LA7; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC12) goto LA13; LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; size0 = gettypedesc_536671_839829468(m0, origtype0); } goto LA7; LA14: ; { size0 = gettypedesc_536671_839829468(m0, typ0); } LA7: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = name0; LOC17[1] = size0; LOC17[2] = rope_179401_2381377266(((NI64) (nimtypekind0))); LOC17[3] = base0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4); flags0 = ((NI) 0); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = containsgarbagecollectedref_321117_3876443242(typ0); if (!!(LOC20)) goto LA21; flags0 = (NI)(flags0 | ((NI) 1)); } LA21: ; { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = canformacycle_321123_3876443242(typ0); if (!!(LOC25)) goto LA26; flags0 = (NI)(flags0 | ((NI) 2)); } LA26: ; { TY533811 LOC32; if (!!((flags0 == ((NI) 0)))) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; LOC32[1] = rope_179401_2381377266(((NI64) (flags0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2); } LA30: ; LOC33 = (Ropeobj179006*)0; LOC33 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_129)); memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = name0; LOC35 = (NimStringDesc*)0; LOC35 = typetostring_321017_3876443242(typ0, ((Tprefereddesc321011) 0)); LOC34[1] = rope_179277_2381377266(LOC35); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2); } N_NIMCALL(Ropeobj179006*, getnimnode_536945_839829468)(Tcgen530027* m0) { Ropeobj179006* result0; TY533811 LOC1; result0 = (Ropeobj179006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = (*m0).typenodesname; LOC1[1] = rope_179401_2381377266(((NI64) ((*m0).typenodes))); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2); (*m0).typenodes += ((NI) 1); return result0; } N_NIMCALL(void, gentupleinfo_537549_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0) { Ropeobj179006* LOC1; Ropeobj179006* expr0; NI length0; TY533811 LOC15; LOC1 = (Ropeobj179006*)0; LOC1 = rope_179277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_536960_839829468(m0, typ0, typ0, name0, LOC1); expr0 = getnimnode_536945_839829468(m0); length0 = sonslen_296327_850551059(typ0); { Ropeobj179006* tmp0; TY533811 LOC6; TY536238 LOC12; if (!(((NI) 0) < length0)) goto LA4; tmp0 = gettempname_534596_839829468(m0); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = tmp0; LOC6[1] = rope_179401_2381377266(((NI64) (length0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2); { NI i_537571_839829468; NI HEX3Atmp_537590_839829468; NI res_537593_839829468; i_537571_839829468 = (NI)0; HEX3Atmp_537590_839829468 = (NI)0; HEX3Atmp_537590_839829468 = (NI)(length0 - ((NI) 1)); res_537593_839829468 = ((NI) 0); { while (1) { Ttype293840* a0; Ropeobj179006* tmp20; TY536238 LOC10; TY536235 LOC11; if (!(res_537593_839829468 <= HEX3Atmp_537590_839829468)) goto LA9; i_537571_839829468 = res_537593_839829468; a0 = (*typ0).sons->data[i_537571_839829468]; tmp20 = getnimnode_536945_839829468(m0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0; LOC10[1] = rope_179401_2381377266(((NI64) (i_537571_839829468))); LOC10[2] = tmp20; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = tmp20; LOC11[1] = gettypedesc_536671_839829468(m0, typ0); LOC11[2] = rope_179401_2381377266(((NI64) (i_537571_839829468))); LOC11[3] = gentypeinfo_536941_839829468(m0, a0); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4); res_537593_839829468 += ((NI) 1); } LA9: ; } } memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = expr0; LOC12[1] = rope_179401_2381377266(((NI64) (length0))); LOC12[2] = tmp0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3); } goto LA2; LA4: ; { TY533811 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_179401_2381377266(((NI64) (length0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2); } LA2: ; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = name0; LOC15[1] = expr0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2); } N_NIMCALL(Ttype293840*, fakeclosuretype_538010_839829468)(Tsym293834* owner0) { Ttype293840* result0; Ttype293840* LOC1; Ttype293840* r0; Ttype293840* LOC2; result0 = (Ttype293840*)0; result0 = newtype_296107_850551059(((Ttypekind293244) 18), owner0); LOC1 = (Ttype293840*)0; LOC1 = newtype_296107_850551059(((Ttypekind293244) 26), owner0); rawaddson_297394_850551059(result0, LOC1); r0 = newtype_296107_850551059(((Ttypekind293244) 22), owner0); LOC2 = (Ttype293840*)0; LOC2 = newtype_296107_850551059(((Ttypekind293244) 18), owner0); rawaddson_297394_850551059(r0, LOC2); rawaddson_297394_850551059(result0, r0); return result0; } N_NIMCALL(void, gentypeinfoaux_537027_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0) { Ropeobj179006* base0; base0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; NI LOC4; Ttype293840* x0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = sonslen_296327_850551059(typ0); LOC3 = (((NI) 0) < LOC4); if (!(LOC3)) goto LA5; LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL)); LA5: ; if (!LOC3) goto LA6; x0 = (*typ0).sons->data[((NI) 0)]; { if (!((*typ0).kind == ((Ttypekind293244) 17))) goto LA10; x0 = skiptypes_297099_850551059(x0, IL64(211106247215360)); } LA10: ; base0 = gentypeinfo_536941_839829468(m0, x0); } goto LA1; LA6: ; { base0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_18)); } LA1: ; gentypeinfoauxbase_536960_839829468(m0, typ0, origtype0, name0, base0); } static N_INLINE(NIM_BOOL, iscomplexvaluetype_539317_839829468)(Ttype293840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*t0).kind == ((Ttypekind293244) 16) || (*t0).kind == ((Ttypekind293244) 4) || (*t0).kind == ((Ttypekind293244) 19) || (*t0).kind == ((Ttypekind293244) 18) || (*t0).kind == ((Ttypekind293244) 17)); if (LOC1) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind293244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention293002) 8)); LA4: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, usestringh_533345_839829468)(Tcgen530027* m0) { { NIM_BOOL LOC5; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag530025) 4))&7U)))!=0))) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag530025) 4))%(sizeof(NU8)*8)); LOC5 = (NIM_BOOL)0; LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151)); } LA3: ; } N_NIMCALL(Ropeobj179006*, addrloc_539204_839829468)(Tloc293816 a0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = a0.r; { NIM_BOOL LOC3; Tctypekind530007 LOC5; Ropeobj179006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag293810) 0))&15U)))!=0)); if (!(LOC3)) goto LA4; LOC5 = (Tctypekind530007)0; LOC5 = maptype_534393_839829468(a0.t); LOC3 = !((LOC5 == ((Tctypekind530007) 17))); LA4: ; if (!LOC3) goto LA6; LOC8 = (Ropeobj179006*)0; LOC8 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_128), result0); result0 = HEX26_179447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117)); } LA6: ; return result0; } N_NIMCALL(void, genobjectinit_539242_839829468)(Tcproc530021* p0, Tcprocsection530011 section0, Ttype293840* t0, Tloc293816 a0, NIM_BOOL takeaddr0) { Ttypefieldresult321145 LOC1; LOC1 = (Ttypefieldresult321145)0; LOC1 = analyseobjectwithtypefield_321149_3876443242(t0); switch (LOC1) { case ((Ttypefieldresult321145) 0): { } break; case ((Ttypefieldresult321145) 1): { Ropeobj179006* r0; Ttype293840* s0; TY533811 LOC19; r0 = rdloc_539188_839829468(a0); { TY179507 LOC8; if (!!(takeaddr0)) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = r0; r0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1); } LA6: ; s0 = skiptypes_297099_850551059(t0, IL64(211106232576256)); { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA12: ; if (!!(LOC11)) goto LA13; { while (1) { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = ((*s0).kind == ((Ttypekind293244) 17)); if (!(LOC17)) goto LA18; LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL)); LA18: ; if (!LOC17) goto LA16; add_179487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); s0 = skiptypes_297099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360)); } LA16: ; } } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = r0; LOC19[1] = gentypeinfo_536941_839829468((*p0).module, t0); linefmt_533714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2); } break; case ((Ttypefieldresult321145) 2): { Ropeobj179006* r0; TY533811 LOC26; { if (!takeaddr0) goto LA23; r0 = addrloc_539204_839829468(a0); } goto LA21; LA23: ; { r0 = rdloc_539188_839829468(a0); } LA21: ; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = r0; LOC26[1] = gentypeinfo_536941_839829468((*p0).module, t0); linefmt_533714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2); } break; } } N_NIMCALL(void, constructloc_539388_839829468)(Tcproc530021* p0, Tloc293816 loc0, NIM_BOOL istemp0) { Ttype293840* typ0; typ0 = skiptypes_297099_850551059(loc0.t, IL64(211106233624832)); { NIM_BOOL LOC3; TY533811 LOC6; LOC3 = (NIM_BOOL)0; LOC3 = iscomplexvaluetype_539317_839829468(typ0); if (!!(LOC3)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_539188_839829468(loc0); LOC6[1] = gettypedesc_536671_839829468((*p0).module, typ0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2); } goto LA1; LA4: ; { { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = !(istemp0); if (LOC10) goto LA11; LOC10 = containsgarbagecollectedref_321117_3876443242(loc0.t); LA11: ; if (!LOC10) goto LA12; { NIM_BOOL LOC16; TY533811 LOC19; LOC16 = (NIM_BOOL)0; LOC16 = isimportedcpptype_534476_839829468(typ0); if (!!(LOC16)) goto LA17; usestringh_533345_839829468((*p0).module); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_539204_839829468(loc0); LOC19[1] = rdloc_539188_839829468(loc0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2); } LA17: ; } LA12: ; genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 2), loc0.t, loc0, NIM_TRUE); } LA1: ; } N_NIMCALL(void, gettemp_538032_839829468)(Tcproc530021* p0, Ttype293840* t0, Tloc293816* result0, NIM_BOOL needsinit0) { Ropeobj179006* LOC1; TY533811 LOC2; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj179006*)0; LOC1 = rope_179401_2381377266(((NI64) ((*p0).labels))); unsureAsgnRef((void**) (&(*result0).r), HEX26_179452_2381377266(((NimStringDesc*) &T839829468_149), LOC1)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC2[1] = (*result0).r; linefmt_533714_839829468(p0, ((Tcprocsection530011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2); (*result0).k = ((Tlockind293808) 1); unsureAsgnRef((void**) (&(*result0).t), t0); (*result0).s = ((Tstorageloc293812) 2); (*result0).flags = 0; constructloc_539388_839829468(p0, (*result0), !(needsinit0)); } static N_INLINE(Ropeobj179006*, parentobj_538257_839829468)(Ropeobj179006* accessor0, Tcgen530027* m0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; TY179507 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = accessor0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1); } goto LA1; LA5: ; { result0 = accessor0; } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, intliteral_540270_839829468)(NI64 i0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (IL64(-2147483648) < i0); if (!(LOC3)) goto LA4; LOC3 = (i0 <= IL64(2147483647)); LA4: ; if (!LOC3) goto LA5; result0 = rope_179401_2381377266(i0); } goto LA1; LA5: ; { TY534289 LOC10; if (!(i0 == IL64(-2147483648))) goto LA8; memset((void*)LOC10, 0, sizeof(LOC10)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0); } goto LA1; LA8: ; { TY179507 LOC14; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_179401_2381377266(i0); result0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1); } goto LA1; LA12: ; { TY534289 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, int64literal_550430_839829468)(NI64 i0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { TY179507 LOC5; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_179401_2381377266(i0); result0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1); } goto LA1; LA3: ; { TY534289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, uint64literal_550442_839829468)(NU64 i0) { Ropeobj179006* result0; NimStringDesc* LOC1; NimStringDesc* LOC2; result0 = (Ropeobj179006*)0; LOC1 = (NimStringDesc*)0; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_8401_1689653243(i0); LOC1 = rawNewString(LOC2->Sup.len + 3); appendString(LOC1, LOC2); appendString(LOC1, ((NimStringDesc*) &T839829468_171)); result0 = rope_179277_2381377266(LOC1); return result0; } N_NIMCALL(Ropeobj179006*, getstrlit_550468_839829468)(Tcgen530027* m0, NimStringDesc* s0) { Ropeobj179006* result0; Ropeobj179006* LOC1; TY536238 LOC2; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_79)); result0 = gettempname_534596_839829468(m0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = result0; LOC2[1] = makecstring_192638_155036129(s0); LOC2[2] = rope_179401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0)))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3); return result0; } N_NIMCALL(Ropeobj179006*, genliteral_550476_839829468)(Tcproc530021* p0, Tnode293802* n0, Ttype293840* ty0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!(ty0 == NIM_NIL)) goto LA3; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165)); } LA3: ; switch ((*n0).kind) { case ((Tnodekind293020) 5) ... ((Tnodekind293020) 15): { Ttype293840* LOC6; LOC6 = (Ttype293840*)0; LOC6 = skiptypes_297099_850551059(ty0, IL64(211106242013440)); switch ((*LOC6).kind) { case ((Ttypekind293244) 2): case ((Ttypekind293244) 5): { result0 = intliteral_540270_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind293244) 1): { { TY534289 LOC13; if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0); } goto LA9; LA11: ; { TY534289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0); } LA9: ; } break; case ((Ttypekind293244) 35): { result0 = int64literal_550430_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind293244) 44): { result0 = uint64literal_550442_839829468(((NU64) ((*n0).kindU.S1.intval))); } break; default: { TY533811 LOC19; Ttype293840* LOC20; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ttype293840*)0; LOC20 = skiptypes_297099_850551059(ty0, IL64(211106242013440)); LOC19[0] = gettypedesc_536671_839829468((*p0).module, LOC20); LOC19[1] = intliteral_540270_839829468((*n0).kindU.S1.intval); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2); } break; } } break; case ((Tnodekind293020) 23): { Ttype293840* t0; t0 = skiptypes_297099_850551059(ty0, IL64(211106242013440)); { NIM_BOOL LOC24; NI id0; Ropeobj179006* LOC28; LOC24 = (NIM_BOOL)0; LOC24 = ((*t0).kind == ((Ttypekind293244) 25)); if (!(LOC24)) goto LA25; LOC24 = ((*t0).callconv == ((Tcallingconvention293002) 8)); LA25: ; if (!LOC24) goto LA26; id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC28 = (Ropeobj179006*)0; LOC28 = rope_179401_2381377266(((NI64) (id0))); result0 = HEX26_179418_2381377266((*(*p0).module).tmpbase, LOC28); { TY533811 LOC33; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC33[1] = result0; addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2); } LA31: ; } goto LA22; LA26: ; { result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_174)); } LA22: ; } break; case ((Tnodekind293020) 20) ... ((Tnodekind293020) 22): { { TY534289 LOC40; if (!(*n0).kindU.S3.strval == 0) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0); } goto LA36; LA38: ; { Ttype293840* LOC42; NI id0; LOC42 = (Ttype293840*)0; LOC42 = skiptypes_297099_850551059(ty0, IL64(211106242013440)); if (!((*LOC42).kind == ((Ttypekind293244) 28))) goto LA43; id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); { TY179507 LOC49; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = getstrlit_550468_839829468((*p0).module, (*n0).kindU.S3.strval); result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1); } goto LA45; LA47: ; { TY533811 LOC51; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = (*(*p0).module).tmpbase; LOC51[1] = rope_179401_2381377266(((NI64) (id0))); result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2); } LA45: ; } goto LA36; LA43: ; { result0 = makecstring_192638_155036129((*n0).kindU.S3.strval); } LA36: ; } break; case ((Tnodekind293020) 16) ... ((Tnodekind293020) 18): { NimStringDesc* LOC54; LOC54 = (NimStringDesc*)0; LOC54 = tostrmaxprecision_299007_3471544153((*n0).kindU.S2.floatval); result0 = rope_179277_2381377266(LOC54); } break; default: { NimStringDesc* LOC56; LOC56 = (NimStringDesc*)0; LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI293020))->Sup.len + 12); appendString(LOC56, ((NimStringDesc*) &T839829468_179)); appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI293020))); appendChar(LOC56, 41); internalerror_197100_155036129((*n0).info, LOC56); result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj179006*, genliteral_540273_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = genliteral_550476_839829468(p0, n0, (*n0).typ); return result0; } N_NIMCALL(void, gencaserange_538028_839829468)(Tcproc530021* p0, Tnode293802* branch0) { NI length0; length0 = len_294081_850551059(branch0); { NI j_548676_839829468; NI HEX3Atmp_548717_839829468; NI res_548720_839829468; j_548676_839829468 = (NI)0; HEX3Atmp_548717_839829468 = (NI)0; HEX3Atmp_548717_839829468 = (NI)(length0 - ((NI) 2)); res_548720_839829468 = ((NI) 0); { while (1) { if (!(res_548720_839829468 <= HEX3Atmp_548717_839829468)) goto LA3; j_548676_839829468 = res_548720_839829468; { Tnode293802* LOC6; LOC6 = (Tnode293802*)0; LOC6 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); if (!((*LOC6).kind == ((Tnodekind293020) 44))) goto LA7; { TY533811 LOC13; Tnode293802* LOC14; Tnode293802* LOC15; Tnode293802* LOC16; Tnode293802* LOC17; if (!((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 0))&7U)))!=0)) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); LOC14 = (Tnode293802*)0; LOC14 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); LOC15 = (Tnode293802*)0; LOC15 = HEX5BHEX5D_294238_850551059(LOC14, ((NI) 0)); LOC13[0] = genliteral_540273_839829468(p0, LOC15); LOC16 = (Tnode293802*)0; LOC16 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); LOC17 = (Tnode293802*)0; LOC17 = HEX5BHEX5D_294238_850551059(LOC16, ((NI) 1)); LOC13[1] = genliteral_540273_839829468(p0, LOC17); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2); } goto LA9; LA11: ; { Tnode293802* v0; Tnode293802* LOC19; Tnode293802* LOC20; LOC19 = (Tnode293802*)0; LOC19 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); LOC20 = (Tnode293802*)0; LOC20 = HEX5BHEX5D_294238_850551059(LOC19, ((NI) 0)); v0 = copynode_297528_850551059(LOC20); { while (1) { Tnode293802* LOC23; Tnode293802* LOC24; TY179507 LOC25; LOC23 = (Tnode293802*)0; LOC23 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); LOC24 = (Tnode293802*)0; LOC24 = HEX5BHEX5D_294238_850551059(LOC23, ((NI) 1)); if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = genliteral_540273_839829468(p0, v0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1); (*v0).kindU.S1.intval += ((NI) 1); } LA22: ; } } LA9: ; } goto LA4; LA7: ; { TY179507 LOC27; Tnode293802* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Tnode293802*)0; LOC28 = HEX5BHEX5D_294238_850551059(branch0, j_548676_839829468); LOC27[0] = genliteral_540273_839829468(p0, LOC28); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1); } LA4: ; res_548720_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, gentraverseproc_538039_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Tnode293802* n0) { { { if (!(n0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; switch ((*n0).kind) { case ((Tnodekind293020) 138): { { NI i_538068_839829468; NI HEX3Atmp_538239_839829468; NI LOC7; NI res_538242_839829468; i_538068_839829468 = (NI)0; HEX3Atmp_538239_839829468 = (NI)0; LOC7 = (NI)0; LOC7 = sonslen_296351_850551059(n0); HEX3Atmp_538239_839829468 = (NI)(LOC7 - ((NI) 1)); res_538242_839829468 = ((NI) 0); { while (1) { if (!(res_538242_839829468 <= HEX3Atmp_538239_839829468)) goto LA9; i_538068_839829468 = res_538242_839829468; gentraverseproc_538039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_538068_839829468]); res_538242_839829468 += ((NI) 1); } LA9: ; } } } break; case ((Tnodekind293020) 139): { Tcproc530021* p0; Tsym293834* disc0; TY533811 LOC15; TY534289 LOC28; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)))) goto LA13; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162)); } LA13: ; p0 = (*c0).p; disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = accessor0; LOC15[1] = (*disc0).loc.r; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2); { NI i_538098_839829468; NI HEX3Atmp_538249_839829468; NI LOC17; NI res_538252_839829468; i_538098_839829468 = (NI)0; HEX3Atmp_538249_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = sonslen_296351_850551059(n0); HEX3Atmp_538249_839829468 = (NI)(LOC17 - ((NI) 1)); res_538252_839829468 = ((NI) 1); { while (1) { Tnode293802* branch0; Tnode293802* LOC26; TY534289 LOC27; if (!(res_538252_839829468 <= HEX3Atmp_538249_839829468)) goto LA19; i_538098_839829468 = res_538252_839829468; branch0 = (*n0).kindU.S6.sons->data[i_538098_839829468]; { if (!((*branch0).kind == ((Tnodekind293020) 85))) goto LA22; gencaserange_538028_839829468((*c0).p, branch0); } goto LA20; LA22: ; { TY534289 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0); } LA20: ; LOC26 = (Tnode293802*)0; LOC26 = lastson_296364_850551059(branch0); gentraverseproc_538039_839829468(c0, accessor0, LOC26); memset((void*)LOC27, 0, sizeof(LOC27)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0); res_538252_839829468 += ((NI) 1); } LA19: ; } } memset((void*)LOC28, 0, sizeof(LOC28)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0); } break; case ((Tnodekind293020) 3): { Tsym293834* field0; TY533811 LOC34; Ropeobj179006* LOC35; field0 = (*n0).kindU.S4.sym; { if (!((*field0).loc.t == NIM_NIL)) goto LA32; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } LA32: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = accessor0; LOC34[1] = (*field0).loc.r; LOC35 = (Ropeobj179006*)0; LOC35 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2); gentraverseproc_538022_839829468(c0, LOC35, (*field0).loc.t); } break; default: { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } break; } }BeforeRet: ; } N_NIMCALL(void, linecg_533707_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; Ropeobj179006* LOC3; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj179006*)0; LOC3 = indentline_533656_839829468(p0, LOC2); add_179482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentraverseproc_538022_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Ttype293840* typ_538027_839829468) { Ttype293840* typ_538302_839829468; Tcproc530021* p0; { { if (!(typ_538027_839829468 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; typ_538302_839829468 = getuniquetype_529640_2036603609(typ_538027_839829468); p0 = (*c0).p; switch ((*typ_538302_839829468).kind) { case ((Ttypekind293244) 11): case ((Ttypekind293244) 10): case ((Ttypekind293244) 8): { Ttype293840* LOC6; LOC6 = (Ttype293840*)0; LOC6 = lastson_296377_850551059(typ_538302_839829468); gentraverseproc_538022_839829468(c0, accessor0, LOC6); } break; case ((Ttypekind293244) 4): case ((Ttypekind293244) 16): { NI64 arraysize0; Tloc293816 i0; Ttype293840* LOC8; TY533811 LOC9; TY533811 LOC10; Ropeobj179006* LOC11; TY534289 LOC12; arraysize0 = lengthord_321007_3876443242((*typ_538302_839829468).sons->data[((NI) 0)]); memset((void*)(&i0), 0, sizeof(i0)); LOC8 = (Ttype293840*)0; LOC8 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC8, (&i0), NIM_FALSE); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = i0.r; LOC9[1] = rope_179401_2381377266(arraysize0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = accessor0; LOC10[1] = i0.r; LOC11 = (Ropeobj179006*)0; LOC11 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2); gentraverseproc_538022_839829468(c0, LOC11, (*typ_538302_839829468).sons->data[((NI) 1)]); memset((void*)LOC12, 0, sizeof(LOC12)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0); } break; case ((Ttypekind293244) 17): { { NI i_538325_839829468; NI HEX3Atmp_538384_839829468; NI LOC15; NI res_538387_839829468; i_538325_839829468 = (NI)0; HEX3Atmp_538384_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = sonslen_296327_850551059(typ_538302_839829468); HEX3Atmp_538384_839829468 = (NI)(LOC15 - ((NI) 1)); res_538387_839829468 = ((NI) 0); { while (1) { Ttype293840* x0; Ropeobj179006* LOC22; if (!(res_538387_839829468 <= HEX3Atmp_538384_839829468)) goto LA17; i_538325_839829468 = res_538387_839829468; x0 = (*typ_538302_839829468).sons->data[i_538325_839829468]; { if (!!((x0 == NIM_NIL))) goto LA20; x0 = skiptypes_297099_850551059(x0, IL64(211106247215360)); } LA20: ; LOC22 = (Ropeobj179006*)0; LOC22 = parentobj_538257_839829468(accessor0, (*(*c0).p).module); gentraverseproc_538022_839829468(c0, LOC22, x0); res_538387_839829468 += ((NI) 1); } LA17: ; } } { if (!!(((*typ_538302_839829468).n == NIM_NIL))) goto LA25; gentraverseproc_538039_839829468(c0, accessor0, (*typ_538302_839829468).n); } LA25: ; } break; case ((Ttypekind293244) 18): { Ttype293840* typ0; typ0 = getuniquetype_529640_2036603609(typ_538302_839829468); { NI i_538363_839829468; NI HEX3Atmp_538392_839829468; NI LOC29; NI res_538395_839829468; i_538363_839829468 = (NI)0; HEX3Atmp_538392_839829468 = (NI)0; LOC29 = (NI)0; LOC29 = sonslen_296327_850551059(typ0); HEX3Atmp_538392_839829468 = (NI)(LOC29 - ((NI) 1)); res_538395_839829468 = ((NI) 0); { while (1) { TY533811 LOC32; Ropeobj179006* LOC33; if (!(res_538395_839829468 <= HEX3Atmp_538392_839829468)) goto LA31; i_538363_839829468 = res_538395_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = accessor0; LOC32[1] = rope_179401_2381377266(((NI64) (i_538363_839829468))); LOC33 = (Ropeobj179006*)0; LOC33 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2); gentraverseproc_538022_839829468(c0, LOC33, (*typ0).sons->data[i_538363_839829468]); res_538395_839829468 += ((NI) 1); } LA31: ; } } } break; case ((Ttypekind293244) 22): case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { TY179507 LOC35; memset((void*)LOC35, 0, sizeof(LOC35)); LOC35[0] = accessor0; linecg_533707_839829468(p0, ((Tcprocsection530011) 2), (*c0).visitorfrmt, LOC35, 1); } break; case ((Ttypekind293244) 25): { { TY179507 LOC41; TY179507 LOC42; if (!((*typ_538302_839829468).callconv == ((Tcallingconvention293002) 8))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = accessor0; LOC41[0] = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), (*c0).visitorfrmt, LOC41, 1); } LA39: ; } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, gentraverseprocseq_538399_839829468)(Ttraversalclosure538019* c0, Ropeobj179006* accessor0, Ttype293840* typ0) { Tcproc530021* p0; Tloc293816 i0; Ttype293840* LOC1; TY536238 LOC2; NimStringDesc* LOC3; TY533811 LOC11; Ropeobj179006* LOC12; TY534289 LOC13; p0 = (*c0).p; memset((void*)(&i0), 0, sizeof(i0)); LOC1 = (Ttype293840*)0; LOC1 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC1, (&i0), NIM_FALSE); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = i0.r; LOC2[1] = accessor0; LOC3 = (NimStringDesc*)0; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA7: ; if (!LOC6) goto LA8; LOC3 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA4; LA8: ; { LOC3 = copyString(((NimStringDesc*) &T839829468_158)); } LA4: ; LOC2[2] = rope_179277_2381377266(LOC3); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = accessor0; LOC11[1] = i0.r; LOC12 = (Ropeobj179006*)0; LOC12 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2); gentraverseproc_538022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]); memset((void*)LOC13, 0, sizeof(LOC13)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0); } N_NIMCALL(Ropeobj179006*, gentraverseproc_538632_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttypeinforeason538016 reason0) { Ropeobj179006* result0; Ttraversalclosure538019 c0; Tcproc530021* p0; Ropeobj179006* header0; TY179507 LOC3; Ropeobj179006* t0; TY179507 LOC4; TY179507 LOC5; Ropeobj179006* generatedproc0; TY536235 LOC20; Ropeobj179006** LOC21; Ropeobj179006** LOC22; Ropeobj179006** LOC23; TY179507 LOC24; result0 = (Ropeobj179006*)0; memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_530206_3723162438(NIM_NIL, m0); result0 = gettempname_534596_839829468(m0); switch (reason0) { case ((Ttypeinforeason538016) 0): { c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145)); } break; default: { } break; } memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = result0; header0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1); t0 = gettypedesc_536671_839829468(m0, typ0); memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = t0; linef_533700_839829468(p0, ((Tcprocsection530011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = t0; linef_533700_839829468(p0, ((Tcprocsection530011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1); c0.p = p0; { Ropeobj179006* LOC10; if (!((*typ0).kind == ((Ttypekind293244) 24))) goto LA8; LOC10 = (Ropeobj179006*)0; LOC10 = rope_179277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseprocseq_538399_839829468((&c0), LOC10, typ0); } goto LA6; LA8: ; { { Ttype293840* LOC14; Ropeobj179006* LOC17; LOC14 = (Ttype293840*)0; LOC14 = skiptypes_297099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256)); if (!((*LOC14).kind == ((Ttypekind293244) 4) || (*LOC14).kind == ((Ttypekind293244) 16))) goto LA15; LOC17 = (Ropeobj179006*)0; LOC17 = rope_179277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseproc_538022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]); } goto LA12; LA15: ; { Ropeobj179006* LOC19; LOC19 = (Ropeobj179006*)0; LOC19 = rope_179277_2381377266(((NimStringDesc*) &T839829468_189)); gentraverseproc_538022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]); } LA12: ; } LA6: ; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = header0; LOC21 = (Ropeobj179006**)0; LOC21 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); LOC20[1] = (*LOC21); LOC22 = (Ropeobj179006**)0; LOC22 = s_530179_3723162438(p0, ((Tcprocsection530011) 1)); LOC20[2] = (*LOC22); LOC23 = (Ropeobj179006**)0; LOC23 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); LOC20[3] = (*LOC23); generatedproc0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = header0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, genarrayinfo_538005_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0) { Ropeobj179006* LOC1; LOC1 = (Ropeobj179006*)0; LOC1 = gentypeinfo_536941_839829468(m0, (*typ0).sons->data[((NI) 1)]); gentypeinfoauxbase_536960_839829468(m0, typ0, typ0, name0, LOC1); } N_NIMCALL(void, gensetinfo_537867_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0) { Ropeobj179006* tmp0; TY536238 LOC1; NI64 LOC2; gentypeinfoaux_537027_839829468(m0, typ0, typ0, name0); tmp0 = getnimnode_536945_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC2 = (NI64)0; LOC2 = firstord_321001_3876443242(typ0); LOC1[1] = rope_179401_2381377266(LOC2); LOC1[2] = name0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3); } N_NIMCALL(void, genenuminfo_537597_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ropeobj179006* name0) { Ropeobj179006* nodeptrs0; NI length0; TY533811 LOC1; Ropeobj179006* enumnames0; Ropeobj179006* specialcases0; NI firstnimnode0; NIM_BOOL hasholes0; Ropeobj179006* enumarray0; Ropeobj179006* counter0; TY179507 LOC24; TY536238 LOC25; TY537847 LOC26; TY536235 LOC27; gentypeinfoaux_537027_839829468(m0, typ0, typ0, name0); nodeptrs0 = gettempname_534596_839829468(m0); length0 = sonslen_296351_850551059((*typ0).n); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = nodeptrs0; LOC1[1] = rope_179401_2381377266(((NI64) (length0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2); enumnames0 = (Ropeobj179006*)0; specialcases0 = (Ropeobj179006*)0; firstnimnode0 = (*m0).typenodes; hasholes0 = NIM_FALSE; { NI i_537622_839829468; NI HEX3Atmp_537860_839829468; NI res_537863_839829468; i_537622_839829468 = (NI)0; HEX3Atmp_537860_839829468 = (NI)0; HEX3Atmp_537860_839829468 = (NI)(length0 - ((NI) 1)); res_537863_839829468 = ((NI) 0); { while (1) { Tsym293834* field0; Ropeobj179006* elemnode0; if (!(res_537863_839829468 <= HEX3Atmp_537860_839829468)) goto LA4; i_537622_839829468 = res_537863_839829468; field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_537622_839829468]).kindU.S4.sym; elemnode0 = getnimnode_536945_839829468(m0); { Ropeobj179006* LOC9; if (!((*field0).ast == NIM_NIL)) goto LA7; LOC9 = (Ropeobj179006*)0; LOC9 = makecstring_192638_155036129((*(*field0).name).s); add_179482_2381377266(&enumnames0, LOC9); } goto LA5; LA7: ; { Ropeobj179006* LOC11; LOC11 = (Ropeobj179006*)0; LOC11 = makecstring_192638_155036129((*(*field0).ast).kindU.S3.strval); add_179482_2381377266(&enumnames0, LOC11); } LA5: ; { NimStringDesc* LOC16; if (!(i_537622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14; LOC16 = (NimStringDesc*)0; LOC16 = rawNewString(tnl_177644_4151366050->Sup.len + 2); appendString(LOC16, ((NimStringDesc*) &T839829468_110)); appendString(LOC16, tnl_177644_4151366050); add_179487_2381377266(&enumnames0, LOC16); } LA14: ; { NIM_BOOL LOC19; TY533811 LOC23; LOC19 = (NIM_BOOL)0; LOC19 = !(((*field0).position == i_537622_839829468)); if (LOC19) goto LA20; LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 5))&31U)))!=0); LA20: ; if (!LOC19) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = elemnode0; LOC23[1] = rope_179401_2381377266(((NI64) ((*field0).position))); addf_180205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2); hasholes0 = NIM_TRUE; } LA21: ; res_537863_839829468 += ((NI) 1); } LA4: ; } } enumarray0 = gettempname_534596_839829468(m0); counter0 = gettempname_534596_839829468(m0); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = counter0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = enumarray0; LOC25[1] = rope_179401_2381377266(((NI64) (length0))); LOC25[2] = enumnames0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = counter0; LOC26[1] = rope_179401_2381377266(((NI64) (length0))); LOC26[2] = (*m0).typenodesname; LOC26[3] = rope_179401_2381377266(((NI64) (firstnimnode0))); LOC26[4] = enumarray0; LOC26[5] = nodeptrs0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], specialcases0); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = getnimnode_536945_839829468(m0); LOC27[1] = rope_179401_2381377266(((NI64) (length0))); LOC27[2] = nodeptrs0; LOC27[3] = name0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4); { TY179507 LOC32; if (!hasholes0) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1); } LA30: ; } N_NIMCALL(Ropeobj179006*, discriminatortablename_537057_839829468)(Tcgen530027* m0, Ttype293840* objtype_537060_839829468, Tsym293834* d0) { Ropeobj179006* result0; Ttype293840* objtype0; TY533811 LOC8; NimStringDesc* LOC9; result0 = (Ropeobj179006*)0; objtype0 = objtype_537060_839829468; { while (1) { Tsym293834* LOC3; LOC3 = (Tsym293834*)0; LOC3 = lookupinrecord_300119_2984716966((*objtype0).n, (*d0).name); if (!(LOC3 == NIM_NIL)) goto LA2; objtype0 = (*objtype0).sons->data[((NI) 0)]; } LA2: ; } { if (!((*objtype0).sym == NIM_NIL)) goto LA6; internalerror_197100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200)); } LA6: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_179401_2381377266(((NI64) ((*objtype0).Sup.id))); LOC9 = (NimStringDesc*)0; LOC9 = mangle_529847_2036603609((*(*d0).name).s); LOC8[1] = rope_179277_2381377266(LOC9); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2); return result0; } N_NIMCALL(void, genobjectfields_537104_839829468)(Tcgen530027* m0, Ttype293840* typ0, Tnode293802* n0, Ropeobj179006* expr0) { switch ((*n0).kind) { case ((Tnodekind293020) 138): { NI L0; L0 = sonslen_296351_850551059(n0); { if (!(L0 == ((NI) 1))) goto LA4; genobjectfields_537104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0); } goto LA2; LA4: ; { Ropeobj179006* tmp0; TY533811 LOC9; TY536238 LOC14; if (!(((NI) 0) < L0)) goto LA7; tmp0 = gettempname_534596_839829468(m0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = tmp0; LOC9[1] = rope_179401_2381377266(((NI64) (L0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2); { NI i_537127_839829468; NI HEX3Atmp_537482_839829468; NI res_537485_839829468; i_537127_839829468 = (NI)0; HEX3Atmp_537482_839829468 = (NI)0; HEX3Atmp_537482_839829468 = (NI)(L0 - ((NI) 1)); res_537485_839829468 = ((NI) 0); { while (1) { Ropeobj179006* tmp20; TY536238 LOC13; if (!(res_537485_839829468 <= HEX3Atmp_537482_839829468)) goto LA12; i_537127_839829468 = res_537485_839829468; tmp20 = getnimnode_536945_839829468(m0); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = tmp0; LOC13[1] = rope_179401_2381377266(((NI64) (i_537127_839829468))); LOC13[2] = tmp20; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3); genobjectfields_537104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_537127_839829468], tmp20); res_537485_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_179401_2381377266(((NI64) (L0))); LOC14[2] = tmp0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3); } goto LA2; LA7: ; { TY533811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = expr0; LOC16[1] = rope_179401_2381377266(((NI64) (L0))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2); } LA2: ; } break; case ((Tnodekind293020) 139): { Tsym293834* field0; Ropeobj179006* tmp0; NI64 L0; TY537401 LOC18; TY533811 LOC19; field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; tmp0 = discriminatortablename_537057_839829468(m0, typ0, field0); L0 = lengthord_321007_3876443242((*field0).typ); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = expr0; LOC18[1] = gettypedesc_536671_839829468(m0, typ0); LOC18[2] = (*field0).loc.r; LOC18[3] = gentypeinfo_536941_839829468(m0, (*field0).typ); LOC18[4] = makecstring_192638_155036129((*(*field0).name).s); LOC18[5] = tmp0; LOC18[6] = rope_179401_2381377266(L0); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0; LOC19[1] = rope_179401_2381377266((NI64)(L0 + IL64(1))); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2); { NI i_537421_839829468; NI HEX3Atmp_537499_839829468; NI LOC21; NI res_537502_839829468; i_537421_839829468 = (NI)0; HEX3Atmp_537499_839829468 = (NI)0; LOC21 = (NI)0; LOC21 = sonslen_296351_850551059(n0); HEX3Atmp_537499_839829468 = (NI)(LOC21 - ((NI) 1)); res_537502_839829468 = ((NI) 1); { while (1) { Tnode293802* b0; Ropeobj179006* tmp20; Tnode293802* LOC24; if (!(res_537502_839829468 <= HEX3Atmp_537499_839829468)) goto LA23; i_537421_839829468 = res_537502_839829468; b0 = (*n0).kindU.S6.sons->data[i_537421_839829468]; tmp20 = getnimnode_536945_839829468(m0); LOC24 = (Tnode293802*)0; LOC24 = lastson_296364_850551059(b0); genobjectfields_537104_839829468(m0, typ0, LOC24, tmp20); switch ((*b0).kind) { case ((Tnodekind293020) 85): { { NI LOC28; LOC28 = (NI)0; LOC28 = sonslen_296351_850551059(b0); if (!(LOC28 < ((NI) 2))) goto LA29; internalerror_197100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204)); } LA29: ; { NI j_537436_839829468; NI HEX3Atmp_537492_839829468; NI LOC32; NI res_537495_839829468; j_537436_839829468 = (NI)0; HEX3Atmp_537492_839829468 = (NI)0; LOC32 = (NI)0; LOC32 = sonslen_296351_850551059(b0); HEX3Atmp_537492_839829468 = (NI)(LOC32 - ((NI) 2)); res_537495_839829468 = ((NI) 0); { while (1) { if (!(res_537495_839829468 <= HEX3Atmp_537492_839829468)) goto LA34; j_537436_839829468 = res_537495_839829468; { NI x0; NI64 LOC39; NI y0; NI64 LOC40; if (!((*(*b0).kindU.S6.sons->data[j_537436_839829468]).kind == ((Tnodekind293020) 44))) goto LA37; LOC39 = (NI64)0; LOC39 = getordvalue_321129_3876443242((*(*b0).kindU.S6.sons->data[j_537436_839829468]).kindU.S6.sons->data[((NI) 0)]); x0 = ((NI) (LOC39)); LOC40 = (NI64)0; LOC40 = getordvalue_321129_3876443242((*(*b0).kindU.S6.sons->data[j_537436_839829468]).kindU.S6.sons->data[((NI) 1)]); y0 = ((NI) (LOC40)); { while (1) { TY536238 LOC43; if (!(x0 <= y0)) goto LA42; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = tmp0; LOC43[1] = rope_179401_2381377266(((NI64) (x0))); LOC43[2] = tmp20; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3); x0 += ((NI) 1); } LA42: ; } } goto LA35; LA37: ; { TY536238 LOC45; NI64 LOC46; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = tmp0; LOC46 = (NI64)0; LOC46 = getordvalue_321129_3876443242((*b0).kindU.S6.sons->data[j_537436_839829468]); LOC45[1] = rope_179401_2381377266(LOC46); LOC45[2] = tmp20; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3); } LA35: ; res_537495_839829468 += ((NI) 1); } LA34: ; } } } break; case ((Tnodekind293020) 88): { TY536238 LOC48; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = tmp0; LOC48[1] = rope_179401_2381377266(L0); LOC48[2] = tmp20; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3); } break; default: { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205)); } break; } res_537502_839829468 += ((NI) 1); } LA23: ; } } } break; case ((Tnodekind293020) 3): { Tsym293834* field0; field0 = (*n0).kindU.S4.sym; { TY537475 LOC55; if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = expr0; LOC55[1] = gettypedesc_536671_839829468(m0, typ0); LOC55[2] = (*field0).loc.r; LOC55[3] = gentypeinfo_536941_839829468(m0, (*field0).typ); LOC55[4] = makecstring_192638_155036129((*(*field0).name).s); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5); } LA53: ; } break; default: { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207)); } break; } } N_NIMCALL(void, genobjectinfo_537506_839829468)(Tcgen530027* m0, Ttype293840* typ0, Ttype293840* origtype0, Ropeobj179006* name0) { Ropeobj179006* tmp0; TY533811 LOC12; Ttype293840* t0; { if (!((*typ0).kind == ((Ttypekind293244) 17))) goto LA3; gentypeinfoaux_537027_839829468(m0, typ0, origtype0, name0); } goto LA1; LA3: ; { Ropeobj179006* LOC6; LOC6 = (Ropeobj179006*)0; LOC6 = rope_179277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_536960_839829468(m0, typ0, origtype0, name0, LOC6); } LA1: ; tmp0 = getnimnode_536945_839829468(m0); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = isimportedcpptype_534476_839829468(typ0); if (!!(LOC9)) goto LA10; genobjectfields_537104_839829468(m0, typ0, (*typ0).n, tmp0); } LA10: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = name0; LOC12[1] = tmp0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2); t0 = (*typ0).sons->data[((NI) 0)]; { while (1) { if (!!((t0 == NIM_NIL))) goto LA14; t0 = skiptypes_297099_850551059(t0, IL64(211106247215360)); (*t0).flags |= ((NU32)1)<<((((Ttypeflag293431) 5))%(sizeof(NU32)*8)); t0 = (*t0).sons->data[((NI) 0)]; } LA14: ; } } N_NIMCALL(void, gendeepcopyproc_539066_839829468)(Tcgen530027* m0, Tsym293834* s0, Ropeobj179006* result0) { TY533811 LOC1; genproc_533951_839829468(m0, s0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = result0; LOC1[1] = (*s0).loc.r; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2); } N_NIMCALL(Ropeobj179006*, gentypeinfo_536941_839829468)(Tcgen530027* m0, Ttype293840* t_536944_839829468) { Ropeobj179006* result0; Ttype293840* origtype0; Ttype293840* t0; TY179507 LOC1; Tsym293834* owner0; Ttype293840* LOC12; Ropeobj179006* LOC66; Ropeobj179006* LOC67; Ropeobj179006* LOC68; { result0 = (Ropeobj179006*)0; origtype0 = t_536944_839829468; t0 = getuniquetype_529640_2036603609(t_536944_839829468); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rope_179401_2381377266(((NI64) ((*t0).Sup.id))); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1); { NIM_BOOL LOC4; Ropeobj179006* LOC7; Ropeobj179006* LOC8; Ropeobj179006* LOC9; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_269862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id); if (!LOC4) goto LA5; LOC7 = (Ropeobj179006*)0; LOC7 = rope_179277_2381377266(((NimStringDesc*) &T839829468_128)); LOC8 = (Ropeobj179006*)0; LOC8 = HEX26_179418_2381377266(LOC7, result0); LOC9 = (Ropeobj179006*)0; LOC9 = rope_179277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_179418_2381377266(LOC8, LOC9); goto BeforeRet; } LA5: ; { while (1) { if (!((*t0).kind == ((Ttypekind293244) 13))) goto LA11; t0 = lastson_296377_850551059(t0); } LA11: ; } LOC12 = (Ttype293840*)0; LOC12 = skiptypes_297099_850551059(t0, IL64(211106247256320)); owner0 = getmodule_300123_2984716966((*LOC12).owner); { Tcgen530027* LOC17; Ropeobj179006* LOC18; Ropeobj179006* LOC19; Ropeobj179006* LOC20; TY533811 LOC21; NimStringDesc* LOC22; Ropeobj179006* LOC23; Ropeobj179006* LOC24; Ropeobj179006* LOC25; if (!!((owner0 == (*m0).module))) goto LA15; LOC17 = (Tcgen530027*)0; LOC17 = bmod_530201_3723162438(owner0); LOC18 = (Ropeobj179006*)0; LOC18 = gentypeinfo_536941_839829468(LOC17, t0); LOC19 = (Ropeobj179006*)0; LOC19 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_129)); LOC20 = (Ropeobj179006*)0; LOC20 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_130)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = result0; LOC22 = (NimStringDesc*)0; LOC22 = typetostring_321017_3876443242(t0, ((Tprefereddesc321011) 0)); LOC21[1] = rope_179277_2381377266(LOC22); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2); LOC23 = (Ropeobj179006*)0; LOC23 = rope_179277_2381377266(((NimStringDesc*) &T839829468_128)); LOC24 = (Ropeobj179006*)0; LOC24 = HEX26_179418_2381377266(LOC23, result0); LOC25 = (Ropeobj179006*)0; LOC25 = rope_179277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_179418_2381377266(LOC24, LOC25); goto BeforeRet; } LA15: ; switch ((*t0).kind) { case ((Ttypekind293244) 3): case ((Ttypekind293244) 62): { result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_132)); } break; case ((Ttypekind293244) 26): case ((Ttypekind293244) 1): case ((Ttypekind293244) 2): case ((Ttypekind293244) 29): case ((Ttypekind293244) 28): case ((Ttypekind293244) 31) ... ((Ttypekind293244) 44): case ((Ttypekind293244) 23): { Ropeobj179006* LOC28; LOC28 = (Ropeobj179006*)0; LOC28 = rope_179277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_536960_839829468(m0, t0, t0, result0, LOC28); } break; case ((Ttypekind293244) 59): { { Ttype293840* LOC34; if (!!(((*t0).n == NIM_NIL))) goto LA32; LOC34 = (Ttype293840*)0; LOC34 = lastson_296377_850551059(t0); result0 = gentypeinfo_536941_839829468(m0, LOC34); } goto LA30; LA32: ; { NimStringDesc* LOC36; LOC36 = (NimStringDesc*)0; LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI293244))->Sup.len + 13); appendString(LOC36, ((NimStringDesc*) &T839829468_137)); appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI293244))); appendChar(LOC36, 41); internalerror_197113_155036129(LOC36); } LA30: ; } break; case ((Ttypekind293244) 25): { { Ropeobj179006* LOC42; if (!!(((*t0).callconv == ((Tcallingconvention293002) 8)))) goto LA40; LOC42 = (Ropeobj179006*)0; LOC42 = rope_179277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_536960_839829468(m0, t0, t0, result0, LOC42); } goto LA38; LA40: ; { Ttype293840* LOC44; LOC44 = (Ttype293840*)0; LOC44 = fakeclosuretype_538010_839829468((*t0).owner); gentupleinfo_537549_839829468(m0, LOC44, result0); } LA38: ; } break; case ((Ttypekind293244) 24): case ((Ttypekind293244) 22): { gentypeinfoaux_537027_839829468(m0, t0, t0, result0); { Ropeobj179006* markerproc0; TY533811 LOC50; if (!(((Tgcmode170080) 4) <= gselectedgc_170133_2607990831)) goto LA48; markerproc0 = gentraverseproc_538632_839829468(m0, t0, ((Ttypeinforeason538016) 0)); memset((void*)LOC50, 0, sizeof(LOC50)); LOC50[0] = result0; LOC50[1] = markerproc0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2); } LA48: ; } break; case ((Ttypekind293244) 21): case ((Ttypekind293244) 20): { gentypeinfoaux_537027_839829468(m0, t0, t0, result0); } break; case ((Ttypekind293244) 4): case ((Ttypekind293244) 16): { genarrayinfo_538005_839829468(m0, t0, result0); } break; case ((Ttypekind293244) 19): { gensetinfo_537867_839829468(m0, t0, result0); } break; case ((Ttypekind293244) 14): { genenuminfo_537597_839829468(m0, t0, result0); } break; case ((Ttypekind293244) 17): { genobjectinfo_537506_839829468(m0, t0, origtype0, result0); } break; case ((Ttypekind293244) 18): { gentupleinfo_537549_839829468(m0, t0, result0); } break; default: { NimStringDesc* LOC58; LOC58 = (NimStringDesc*)0; LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI293244))->Sup.len + 13); appendString(LOC58, ((NimStringDesc*) &T839829468_137)); appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI293244))); appendChar(LOC58, 41); internalerror_197113_155036129(LOC58); } break; } { if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61; gendeepcopyproc_539066_839829468(m0, (*t0).deepcopy, result0); } goto LA59; LA61: ; { if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64; gendeepcopyproc_539066_839829468(m0, (*origtype0).deepcopy, result0); } goto LA59; LA64: ; LA59: ; LOC66 = (Ropeobj179006*)0; LOC66 = rope_179277_2381377266(((NimStringDesc*) &T839829468_128)); LOC67 = (Ropeobj179006*)0; LOC67 = HEX26_179418_2381377266(LOC66, result0); LOC68 = (Ropeobj179006*)0; LOC68 = rope_179277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_179418_2381377266(LOC67, LOC68); }BeforeRet: ; return result0; } N_NIMCALL(void, localdebuginfo_539449_839829468)(Tcproc530021* p0, Tsym293834* s0) { Ropeobj179006* a0; TY536235 LOC16; NimStringDesc* LOC17; { { if (!!(((163840 & (*p0).options) == 163840))) goto LA3; goto BeforeRet; } LA3: ; { Ttype293840* LOC7; LOC7 = (Ttype293840*)0; LOC7 = skiptypes_297099_850551059((*s0).typ, IL64(211106240964864)); if (!((*LOC7).kind == ((Ttypekind293244) 27) || (*LOC7).kind == ((Ttypekind293244) 48))) goto LA8; goto BeforeRet; } LA8: ; a0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*s0).kind == ((Tsymkind293435) 3)); if (!(LOC12)) goto LA13; LOC12 = ccgintroducedptr_534609_839829468(s0); LA13: ; if (!LOC12) goto LA14; a0 = (*s0).loc.r; } LA14: ; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_179401_2381377266(((NI64) ((*p0).maxframelen))); LOC17 = (NimStringDesc*)0; LOC17 = nsuNormalize((*(*s0).name).s); LOC16[1] = makecstring_192638_155036129(LOC17); LOC16[2] = a0; LOC16[3] = gentypeinfo_536941_839829468((*p0).module, (*s0).loc.t); linef_533700_839829468(p0, ((Tcprocsection530011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4); (*p0).maxframelen += ((NI) 1); (*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1); }BeforeRet: ; } N_NIMCALL(void, assignlocalvar_539614_839829468)(Tcproc530021* p0, Tsym293834* s0) { Ropeobj179006* decl0; Ropeobj179006* LOC1; Ropeobj179006* LOC2; LOC1 = (Ropeobj179006*)0; LOC1 = localvardecl_539532_839829468(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = HEX26_179447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125)); decl0 = HEX26_179447_2381377266(LOC2, tnl_177644_4151366050); line_533690_839829468(p0, ((Tcprocsection530011) 0), decl0); localdebuginfo_539449_839829468(p0, s0); } N_NIMCALL(void, initlocalvar_539398_839829468)(Tcproc530021* p0, Tsym293834* v0, NIM_BOOL immediateasgn0) { { if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0))) goto LA3; { if (!!(immediateasgn0)) goto LA7; constructloc_539388_839829468(p0, (*v0).loc, NIM_FALSE); } LA7: ; } LA3: ; } N_NIMCALL(void, fillresult_534865_839829468)(Tsym293834* param0) { TY534289 LOC1; Ropeobj179006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj179006*)0; LOC2 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0); fillloc_533282_839829468((&(*param0).loc), ((Tlockind293808) 4), (*param0).typ, LOC2, ((Tstorageloc293812) 2)); { NIM_BOOL LOC5; Tctypekind530007 LOC6; LOC5 = (NIM_BOOL)0; LOC6 = (Tctypekind530007)0; LOC6 = mapreturntype_534445_839829468((*param0).typ); LOC5 = !((LOC6 == ((Tctypekind530007) 17))); if (!(LOC5)) goto LA7; LOC5 = isinvalidreturntype_534548_839829468((*param0).typ); LA7: ; if (!LOC5) goto LA8; (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag293810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc293812) 0); } LA8: ; } N_NIMCALL(void, assignparam_539994_839829468)(Tcproc530021* p0, Tsym293834* s0) { localdebuginfo_539449_839829468(p0, s0); } N_NIMCALL(void, closuresetup_561158_839829468)(Tcproc530021* p0, Tsym293834* prc0) { Tnode293802* ls0; Tnode293802* LOC5; Tsym293834* env0; TY533811 LOC10; { { if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag293431) 11))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; LOC5 = (Tnode293802*)0; LOC5 = HEX5BHEX5D_294238_850551059((*prc0).ast, ((NI) 3)); ls0 = lastson_296364_850551059(LOC5); { if (!!(((*ls0).kind == ((Tnodekind293020) 3)))) goto LA8; internalerror_197100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211)); } LA8: ; env0 = (*ls0).kindU.S4.sym; assignlocalvar_539614_839829468(p0, env0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_539188_839829468((*env0).loc); LOC10[1] = gettypedesc_536671_839829468((*p0).module, (*env0).typ); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2); }BeforeRet: ; } N_NIMCALL(Ropeobj179006*, initgcframe_539435_839829468)(Tcproc530021* p0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { TY179507 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).gcframetype; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(Ropeobj179006*, initframe_561140_839829468)(Tcproc530021* p0, Ropeobj179006* procname0, Ropeobj179006* filename0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218)); { Ropeobj179006* LOC6; TY536235 LOC7; if (!(((NI) 0) < (*p0).maxframelen)) goto LA4; LOC6 = (Ropeobj179006*)0; LOC6 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = procname0; LOC7[1] = filename0; LOC7[2] = rope_179401_2381377266(((NI64) ((*p0).maxframelen))); LOC7[3] = rope_179401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen))); result0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4); } goto LA2; LA4: ; { TY533811 LOC9; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = procname0; LOC9[1] = filename0; result0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2); } LA2: ; return result0; } N_NIMCALL(void, appcg_533648_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, args0, args0Len0); add_179482_2381377266(LOC1, LOC2); } N_NIMCALL(Ropeobj179006*, deinitgcframe_539441_839829468)(Tcproc530021* p0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { TY534289 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0); } LA3: ; return result0; } N_NIMCALL(Ropeobj179006*, deinitframe_561150_839829468)(Tcproc530021* p0) { Ropeobj179006* result0; TY534289 LOC1; result0 = (Ropeobj179006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0); return result0; } N_NIMCALL(void, genprocaux_561284_839829468)(Tcgen530027* m0, Tsym293834* prc0) { Tcproc530021* p0; Ropeobj179006* header0; Ropeobj179006* returnstmt0; Tnode293802* LOC51; Ropeobj179006* generatedproc0; p0 = newproc_530206_3723162438(prc0, m0); header0 = genprocheader_536867_839829468(m0, prc0); returnstmt0 = NIM_NIL; { NIM_BOOL LOC3; Tsym293834* res0; LOC3 = (NIM_BOOL)0; LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0)); if (!(LOC3)) goto LA4; LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL)); LA4: ; if (!LOC3) goto LA5; { NI LOC9; LOC9 = (NI)0; LOC9 = len_294081_850551059((*prc0).ast); if (!(LOC9 <= ((NI) 7))) goto LA10; internalerror_197100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120)); } LA10: ; res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym; { NIM_BOOL LOC14; TY179507 LOC34; LOC14 = (NIM_BOOL)0; LOC14 = isinvalidreturntype_534548_839829468((*(*prc0).typ).sons->data[((NI) 0)]); if (!!(LOC14)) goto LA15; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)) goto LA19; (*res0).flags |= ((NU32)1)<<((((Tsymflag293184) 12))%(sizeof(NU32)*8)); } LA19: ; { NIM_BOOL LOC23; NIM_BOOL LOC24; NIM_BOOL LOC26; Tnode293802* val0; Tnode293802* LOC29; Ropeobj179006* decl0; Tloc293816 a0; TY533811 LOC32; LOC23 = (NIM_BOOL)0; LOC24 = (NIM_BOOL)0; LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0); if (!(LOC24)) goto LA25; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA27: ; LOC24 = LOC26; LA25: ; LOC23 = LOC24; if (!(LOC23)) goto LA28; LOC29 = (Tnode293802*)0; LOC29 = getbody_336227_1724185294(prc0); val0 = easyresultasgn_561191_839829468(LOC29); LOC23 = !((val0 == NIM_NIL)); LA28: ; if (!LOC23) goto LA30; decl0 = localvardecl_539532_839829468(p0, res0); memset((void*)(&a0), 0, sizeof(a0)); initlocexprsingleuse_540289_839829468(p0, val0, (&a0)); memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = decl0; LOC32[1] = rdloc_539188_839829468(a0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2); } goto LA21; LA30: ; { assignlocalvar_539614_839829468(p0, res0); initlocalvar_539398_839829468(p0, res0, NIM_FALSE); } LA21: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_539188_839829468((*res0).loc); returnstmt0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1); } goto LA12; LA15: ; { fillresult_534865_839829468(res0); assignparam_539994_839829468(p0, res0); { Ttype293840* LOC38; LOC38 = (Ttype293840*)0; LOC38 = skiptypes_297099_850551059((*res0).typ, IL64(211106232576256)); if (!((*LOC38).kind == ((Ttypekind293244) 16))) goto LA39; (*res0).loc.s = ((Tstorageloc293812) 0); } LA39: ; } LA12: ; } LA5: ; { NI i_561627_839829468; NI HEX3Atmp_561743_839829468; NI LOC42; NI res_561746_839829468; i_561627_839829468 = (NI)0; HEX3Atmp_561743_839829468 = (NI)0; LOC42 = (NI)0; LOC42 = sonslen_296351_850551059((*(*prc0).typ).n); HEX3Atmp_561743_839829468 = (NI)(LOC42 - ((NI) 1)); res_561746_839829468 = ((NI) 1); { while (1) { if (!(res_561746_839829468 <= HEX3Atmp_561743_839829468)) goto LA44; i_561627_839829468 = res_561746_839829468; { Tsym293834* param0; param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_561627_839829468]).kindU.S4.sym; { NIM_BOOL LOC48; LOC48 = (NIM_BOOL)0; LOC48 = iscompiletimeonly_329706_3876443242((*param0).typ); if (!LOC48) goto LA49; goto LA45; } LA49: ; assignparam_539994_839829468(p0, param0); } LA45: ; res_561746_839829468 += ((NI) 1); } LA44: ; } } closuresetup_561158_839829468(p0, prc0); LOC51 = (Tnode293802*)0; LOC51 = getbody_336227_1724185294(prc0); genstmts_540244_839829468(p0, LOC51); generatedproc0 = (Ropeobj179006*)0; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 14))&31U)))!=0)) goto LA54; { if (!((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 6))&7U)))!=0)) goto LA58; header0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA58: ; } LA54: ; { TY536235 LOC68; Ropeobj179006** LOC69; Ropeobj179006** LOC70; Ropeobj179006** LOC71; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0)) goto LA62; { if (!((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 6))&7U)))!=0)) goto LA66; header0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_214), header0); } LA66: ; memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = header0; LOC69 = (Ropeobj179006**)0; LOC69 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); LOC68[1] = (*LOC69); LOC70 = (Ropeobj179006**)0; LOC70 = s_530179_3723162438(p0, ((Tcprocsection530011) 1)); LOC68[2] = (*LOC70); LOC71 = (Ropeobj179006**)0; LOC71 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); LOC68[3] = (*LOC71); generatedproc0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4); } goto LA60; LA62: ; { TY179507 LOC73; Ropeobj179006* LOC74; Ropeobj179006** LOC93; Ropeobj179006** LOC94; Ropeobj179006* LOC101; TY534289 LOC107; Ropeobj179006* LOC108; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = header0; generatedproc0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1); LOC74 = (Ropeobj179006*)0; LOC74 = initgcframe_539435_839829468(p0); add_179482_2381377266(&generatedproc0, LOC74); { Ropeobj179006** LOC79; Ropeobj179006* procname0; Ropeobj179006* LOC80; Ropeobj179006* LOC81; if (!(((*prc0).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0)) goto LA77; LOC79 = (Ropeobj179006**)0; LOC79 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); add_179482_2381377266(&generatedproc0, (*LOC79)); procname0 = makecstring_192638_155036129((*(*prc0).name).s); LOC80 = (Ropeobj179006*)0; LOC80 = quotedfilename_197818_155036129((*prc0).info); LOC81 = (Ropeobj179006*)0; LOC81 = initframe_561140_839829468(p0, procname0, LOC80); add_179482_2381377266(&generatedproc0, LOC81); } goto LA75; LA77: ; { Ropeobj179006** LOC83; LOC83 = (Ropeobj179006**)0; LOC83 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); add_179482_2381377266(&generatedproc0, (*LOC83)); } LA75: ; { TY534289 LOC88; if (!(((*prc0).options &(1U<<((NU)(((Toption170009) 19))&31U)))!=0)) goto LA86; memset((void*)LOC88, 0, sizeof(LOC88)); appcg_533648_839829468(p0, ((Tcprocsection530011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0); } LA86: ; { if (!(*p0).beforeretneeded) goto LA91; add_179487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223)); } LA91: ; LOC93 = (Ropeobj179006**)0; LOC93 = s_530179_3723162438(p0, ((Tcprocsection530011) 1)); add_179482_2381377266(&generatedproc0, (*LOC93)); LOC94 = (Ropeobj179006**)0; LOC94 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(&generatedproc0, (*LOC94)); { TY534289 LOC99; Ropeobj179006* LOC100; if (!(*p0).beforeretneeded) goto LA97; memset((void*)LOC99, 0, sizeof(LOC99)); LOC100 = (Ropeobj179006*)0; LOC100 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0); add_179482_2381377266(&generatedproc0, LOC100); } LA97: ; LOC101 = (Ropeobj179006*)0; LOC101 = deinitgcframe_539441_839829468(p0); add_179482_2381377266(&generatedproc0, LOC101); { Ropeobj179006* LOC106; if (!(((*prc0).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0)) goto LA104; LOC106 = (Ropeobj179006*)0; LOC106 = deinitframe_561150_839829468(p0); add_179482_2381377266(&generatedproc0, LOC106); } LA104: ; add_179482_2381377266(&generatedproc0, returnstmt0); memset((void*)LOC107, 0, sizeof(LOC107)); LOC108 = (Ropeobj179006*)0; LOC108 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0); add_179482_2381377266(&generatedproc0, LOC108); } LA60: ; add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 10))- 0], generatedproc0); } N_NIMCALL(Tcgen530027*, findpendingmodule_533241_839829468)(Tcgen530027* m0, Tsym293834* s0) { Tcgen530027* result0; Tsym293834* ms0; result0 = (Tcgen530027*)0; ms0 = getmodule_300123_2984716966(s0); result0 = gmodules_530170_3723162438->data[(*ms0).position]; return result0; } N_NIMCALL(NIM_BOOL, isgetprocaddr_560442_839829468)(Tlib293820* lib0) { NIM_BOOL result0; Tnode293802* n0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; n0 = (*lib0).path; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*n0).kind == ((Tnodekind293020) 27) || (*n0).kind == ((Tnodekind293020) 29) || (*n0).kind == ((Tnodekind293020) 30) || (*n0).kind == ((Tnodekind293020) 31) || (*n0).kind == ((Tnodekind293020) 26) || (*n0).kind == ((Tnodekind293020) 28) || (*n0).kind == ((Tnodekind293020) 32)); if (!(LOC2)) goto LA3; LOC2 = !(((*n0).typ == NIM_NIL)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).typ).kind == ((Ttypekind293244) 26) || (*(*n0).typ).kind == ((Ttypekind293244) 25)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, initlocexpr_540283_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* result0) { initloc_533273_839829468(result0, ((Tlockind293808) 0), (*e0).typ, ((Tstorageloc293812) 0)); expr_540248_839829468(p0, e0, result0); } N_NIMCALL(void, loaddynamiclib_560480_839829468)(Tcgen530027* m0, Tlib293820* lib0) { { Ropeobj179006* tmp0; TY179507 LOC5; if (!!((*lib0).generated)) goto LA3; (*lib0).generated = NIM_TRUE; tmp0 = gettempname_534596_839829468(m0); asgnRefNoCycle((void**) (&(*lib0).name), tmp0); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = tmp0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1); { TY135002* s0; Ropeobj179006* loadlib0; TY533811 LOC18; if (!((*(*lib0).path).kind >= ((Tnodekind293020) 20) && (*(*lib0).path).kind <= ((Tnodekind293020) 22))) goto LA8; s0 = (TY135002*) newSeq((&NTI135002), 0); libcandidates_171605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0)); rawmessage_195612_155036129(((Tmsgkind192002) 286), (*(*lib0).path).kindU.S3.strval); loadlib0 = NIM_NIL; { NI i_560847_839829468; NI HEX3Atmp_560902_839829468; NI res_560905_839829468; i_560847_839829468 = (NI)0; HEX3Atmp_560902_839829468 = (NI)0; HEX3Atmp_560902_839829468 = (s0 ? (s0->Sup.len-1) : -1); res_560905_839829468 = ((NI) 0); { while (1) { TY533811 LOC17; if (!(res_560905_839829468 <= HEX3Atmp_560902_839829468)) goto LA12; i_560847_839829468 = res_560905_839829468; (*m0).labels += ((NI) 1); { if (!(((NI) 0) < i_560847_839829468)) goto LA15; add_179487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229)); } LA15: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = getstrlit_550468_839829468(m0, s0->data[i_560847_839829468]); appcg_533632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2); res_560905_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = loadlib0; LOC18[1] = getstrlit_550468_839829468(m0, (*(*lib0).path).kindU.S3.strval); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2); } goto LA6; LA8: ; { Tcproc530021* p0; Tloc293816 dest0; Ropeobj179006** LOC20; Ropeobj179006** LOC21; Ropeobj179006** LOC22; TY533811 LOC23; p0 = newproc_530206_3723162438(NIM_NIL, m0); (*p0).options = ((*p0).options & ~ 163840); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_540283_839829468(p0, (*lib0).path, (&dest0)); LOC20 = (Ropeobj179006**)0; LOC20 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], (*LOC20)); LOC21 = (Ropeobj179006**)0; LOC21 = s_530179_3723162438(p0, ((Tcprocsection530011) 1)); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 16))- 0], (*LOC21)); LOC22 = (Ropeobj179006**)0; LOC22 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 16))- 0], (*LOC22)); memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = tmp0; LOC23[1] = rdloc_539188_839829468(dest0); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2); } LA6: ; } LA3: ; { if (!((*lib0).name == NIM_NIL)) goto LA26; internalerror_197113_155036129(((NimStringDesc*) &T839829468_233)); } LA26: ; } N_NIMCALL(Ropeobj179006*, mangledynlibproc_539816_839829468)(Tsym293834* sym0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 16))&31U)))!=0)) goto LA3; result0 = rope_179277_2381377266((*(*sym0).name).s); } goto LA1; LA3: ; { TY179507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_179401_2381377266(((NI64) ((*sym0).Sup.id))); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1); } LA1: ; return result0; } N_NIMCALL(void, symindynamiclib_560929_839829468)(Tcgen530027* m0, Tsym293834* sym0) { Tlib293820* lib0; NIM_BOOL iscall0; Ropeobj179006* extname0; Ropeobj179006* tmp0; TY533811 LOC43; lib0 = (*sym0).annex; iscall0 = isgetprocaddr_560442_839829468(lib0); extname0 = (*sym0).loc.r; { if (!!(iscall0)) goto LA3; loaddynamiclib_560480_839829468(m0, lib0); } LA3: ; tmp0 = mangledynlibproc_539816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); (*m0).labels += ((NI) 2); { Tnode293802* n0; Tloc293816 a0; Tnode293802* LOC9; Ropeobj179006* params0; Ropeobj179006* LOC10; Ropeobj179006* load0; TY536235 LOC17; NimStringDesc* LOC18; Tnode293802* last0; NimStringDesc* idx0; if (!iscall0) goto LA7; n0 = (*lib0).path; memset((void*)(&a0), 0, sizeof(a0)); LOC9 = (Tnode293802*)0; LOC9 = HEX5BHEX5D_294238_850551059(n0, ((NI) 0)); initlocexpr_540283_839829468((*m0).initproc, LOC9, (&a0)); LOC10 = (Ropeobj179006*)0; LOC10 = rdloc_539188_839829468(a0); params0 = HEX26_179447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118)); { NI i_560964_839829468; NI HEX3Atmp_561025_839829468; NI LOC12; NI res_561028_839829468; i_560964_839829468 = (NI)0; HEX3Atmp_561025_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = len_294081_850551059(n0); HEX3Atmp_561025_839829468 = (NI)(LOC12 - ((NI) 2)); res_561028_839829468 = ((NI) 1); { while (1) { Tnode293802* LOC15; Ropeobj179006* LOC16; if (!(res_561028_839829468 <= HEX3Atmp_561025_839829468)) goto LA14; i_560964_839829468 = res_561028_839829468; LOC15 = (Tnode293802*)0; LOC15 = HEX5BHEX5D_294238_850551059(n0, i_560964_839829468); initlocexpr_540283_839829468((*m0).initproc, LOC15, (&a0)); LOC16 = (Ropeobj179006*)0; LOC16 = rdloc_539188_839829468(a0); add_179482_2381377266(&params0, LOC16); add_179487_2381377266(&params0, ((NimStringDesc*) &T839829468_110)); res_561028_839829468 += ((NI) 1); } LA14: ; } } memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = gettypedesc_536671_839829468(m0, (*sym0).typ); LOC17[2] = params0; LOC18 = (NimStringDesc*)0; LOC18 = HEX24_179856_2381377266(extname0); LOC17[3] = makecstring_192638_155036129(LOC18); load0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4); last0 = lastson_296364_850551059(n0); { if (!((*last0).kind == ((Tnodekind293020) 58))) goto LA21; last0 = (*last0).kindU.S6.sons->data[((NI) 1)]; } LA21: ; { NimStringDesc* LOC27; if (!!(((*last0).kind == ((Tnodekind293020) 20)))) goto LA25; LOC27 = (NimStringDesc*)0; LOC27 = HEX24_197185_1689653243(T839829468_236); internalerror_197113_155036129(LOC27); } LA25: ; idx0 = (*last0).kindU.S3.strval; { Ropeobj179006** LOC32; if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30; LOC32 = (Ropeobj179006**)0; LOC32 = s_530179_3723162438((*m0).initproc, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC32, load0); } goto LA28; LA30: ; { NIM_BOOL LOC34; LOC34 = (NIM_BOOL)0; LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1)); if (!(LOC34)) goto LA35; LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57))); LA35: ; if (!LOC34) goto LA36; add_179482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0); } goto LA28; LA36: ; { NimStringDesc* LOC39; LOC39 = (NimStringDesc*)0; LOC39 = rawNewString(idx0->Sup.len + 13); appendString(LOC39, ((NimStringDesc*) &T839829468_237)); appendString(LOC39, idx0); internalerror_197100_155036129((*sym0).info, LOC39); } LA28: ; } goto LA5; LA7: ; { TY536235 LOC41; NimStringDesc* LOC42; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = gettypedesc_536671_839829468(m0, (*sym0).typ); LOC41[2] = (*lib0).name; LOC42 = (NimStringDesc*)0; LOC42 = HEX24_179856_2381377266(extname0); LOC41[3] = makecstring_192638_155036129(LOC42); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4); } LA5: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*sym0).loc.r; LOC43[1] = gettypedesc_536671_839829468(m0, (*sym0).loc.t); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2); } N_NIMCALL(void, symindynamiclibpartial_561071_839829468)(Tcgen530027* m0, Tsym293834* sym0) { asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_539816_839829468(sym0)); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); } N_NIMCALL(void, genprocnoforward_561906_839829468)(Tcgen530027* m0, Tsym293834* prc0) { { fillprocloc_540201_839829468(prc0); useheader_533369_839829468(m0, prc0); { Ropeobj179006* LOC5; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 7))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = cgsym_533403_839829468(m0, (*(*prc0).name).s); goto BeforeRet; } LA3: ; genprocprototype_540254_839829468(m0, prc0); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)) goto LA8; } goto LA6; LA8: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention293002) 5))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_269862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id); if (!!(LOC15)) goto LA16; genprocaux_561284_839829468(m0, prc0); } LA16: ; } goto LA6; LA11: ; { Tcgen530027* q0; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 4))&15U)))!=0)) goto LA19; q0 = findpendingmodule_533241_839829468(m0, prc0); { NIM_BOOL LOC23; NIM_BOOL LOC25; LOC23 = (NIM_BOOL)0; LOC23 = !((q0 == NIM_NIL)); if (!(LOC23)) goto LA24; LOC25 = (NIM_BOOL)0; LOC25 = containsorincl_269862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC23 = !(LOC25); LA24: ; if (!LOC23) goto LA26; symindynamiclib_560929_839829468(q0, prc0); } goto LA21; LA26: ; { symindynamiclibpartial_561071_839829468(m0, prc0); } LA21: ; } goto LA6; LA19: ; { Tcgen530027* q0; if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0))) goto LA30; q0 = findpendingmodule_533241_839829468(m0, prc0); { NIM_BOOL LOC34; NIM_BOOL LOC36; LOC34 = (NIM_BOOL)0; LOC34 = !((q0 == NIM_NIL)); if (!(LOC34)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = containsorincl_269862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC34 = !(LOC36); LA35: ; if (!LOC34) goto LA37; genprocaux_561284_839829468(q0, prc0); } LA37: ; } goto LA6; LA30: ; LA6: ; }BeforeRet: ; } N_NIMCALL(void, genproc_533951_839829468)(Tcgen530027* m0, Tsym293834* prc0) { { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 26))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isactivated_562431_839829468(prc0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; goto BeforeRet; } LA6: ; fillprocloc_540201_839829468(prc0); { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 4))&31U)))!=0)) goto LA10; addforwardedproc_533203_839829468(m0, prc0); } goto LA8; LA10: ; { genprocnoforward_561906_839829468(m0, prc0); { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = ((65600 & (*prc0).flags) == 64); if (!(LOC16)) goto LA17; LOC16 = !((generatedheader_533201_839829468 == NIM_NIL)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)); LA18: ; if (!LOC15) goto LA19; genprocprototype_540254_839829468(generatedheader_533201_839829468, prc0); { if (!((*(*prc0).typ).callconv == ((Tcallingconvention293002) 5))) goto LA23; { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = containsorincl_269862_2627731572((&(*generatedheader_533201_839829468).declaredthings), (*prc0).Sup.id); if (!!(LOC27)) goto LA28; genprocaux_561284_839829468(generatedheader_533201_839829468, prc0); } LA28: ; } LA23: ; } LA19: ; } LA8: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, emulatedthreadvars_533949_839829468)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((71303168 & ~ gglobaloptions_170130_2607990831)==0); return result0; } N_NIMCALL(void, declarethreadvar_539676_839829468)(Tcgen530027* m0, Tsym293834* s0, NIM_BOOL isextern0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_533949_839829468(); if (!LOC3) goto LA4; { NIM_BOOL LOC8; TY533811 LOC11; LOC8 = (NIM_BOOL)0; LOC8 = containsorincl_269862_2627731572((&nimtvdeclared_539675_839829468), (*s0).Sup.id); if (!!(LOC8)) goto LA9; nimtvdeps_539674_839829468 = (Ttypeseq293836*) incrSeqV2(&(nimtvdeps_539674_839829468)->Sup, sizeof(Ttype293840*)); asgnRefNoCycle((void**) (&nimtvdeps_539674_839829468->data[nimtvdeps_539674_839829468->Sup.len]), (*s0).loc.t); ++nimtvdeps_539674_839829468->Sup.len; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_536671_839829468(m0, (*s0).loc.t); LOC11[1] = (*s0).loc.r; addf_180205_2381377266(&nimtv_539656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2); } LA9: ; } goto LA1; LA4: ; { Ropeobj179006* LOC21; TY179507 LOC22; { if (!isextern0) goto LA15; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_240)); } LA15: ; { if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 22))&63U)))!=0)) goto LA19; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_241)); } LA19: ; LOC21 = (Ropeobj179006*)0; LOC21 = gettypedesc_536671_839829468(m0, (*s0).loc.t); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], LOC21); memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = (*s0).loc.r; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1); } LA1: ; } N_NIMCALL(void, genvarprototypeaux_545254_839829468)(Tcgen530027* m0, Tsym293834* sym0) { Ropeobj179006* LOC1; { useheader_533369_839829468(m0, sym0); LOC1 = (Ropeobj179006*)0; LOC1 = manglename_534205_839829468(sym0); fillloc_533282_839829468((&(*sym0).loc), ((Tlockind293808) 3), (*sym0).typ, LOC1, ((Tstorageloc293812) 3)); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0); if (LOC4) goto LA5; LOC4 = containsorincl_269862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LA5: ; if (!LOC4) goto LA6; goto BeforeRet; } LA6: ; { if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 22))&31U)))!=0)) goto LA14; declarethreadvar_539676_839829468(m0, sym0, NIM_TRUE); } goto LA12; LA14: ; { Ropeobj179006* LOC17; TY179507 LOC30; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_240)); LOC17 = (Ropeobj179006*)0; LOC17 = gettypedesc_536671_839829468(m0, (*sym0).loc.t); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], LOC17); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 4))&15U)))!=0)) goto LA20; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_53)); } LA20: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 8))&31U)))!=0)) goto LA24; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_121)); } LA24: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 7))&31U)))!=0)) goto LA28; add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_122)); } LA28: ; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = (*sym0).loc.r; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1); } LA12: ; } LA10: ; }BeforeRet: ; } N_NIMCALL(void, genvarprototype_540236_839829468)(Tcgen530027* m0, Tsym293834* sym0) { genvarprototypeaux_545254_839829468(m0, sym0); } N_NIMCALL(Ropeobj179006*, cgsym_533403_839829468)(Tcgen530027* m0, NimStringDesc* name0) { Ropeobj179006* result0; Tsym293834* sym0; result0 = (Ropeobj179006*)0; sym0 = getcompilerproc_339746_3937434831(name0); { if (!!((sym0 == NIM_NIL))) goto LA3; switch ((*sym0).kind) { case ((Tsymkind293435) 12): case ((Tsymkind293435) 13): case ((Tsymkind293435) 15): case ((Tsymkind293435) 14): { genproc_533951_839829468(m0, sym0); } break; case ((Tsymkind293435) 8): case ((Tsymkind293435) 11): case ((Tsymkind293435) 9): { genvarprototype_540236_839829468(m0, sym0); } break; case ((Tsymkind293435) 7): { Ropeobj179006* LOC8; LOC8 = (Ropeobj179006*)0; LOC8 = gettypedesc_536671_839829468(m0, (*sym0).typ); } break; default: { NimStringDesc* LOC10; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI293435))->Sup.len + 9); appendString(LOC10, ((NimStringDesc*) &T839829468_243)); appendString(LOC10, name0); appendString(LOC10, ((NimStringDesc*) &T839829468_244)); appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI293435))); internalerror_197113_155036129(LOC10); } break; } } goto LA1; LA3: ; { rawmessage_195612_155036129(((Tmsgkind192002) 68), name0); } LA1: ; result0 = (*sym0).loc.r; return result0; } N_NIMCALL(Ropeobj179006*, ropecg_533407_839829468)(Tcgen530027* m0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006* result0; NI i0; NI length0; NI num0; result0 = (Ropeobj179006*)0; i0 = ((NI) 0); length0 = (frmt0 ? frmt0->Sup.len : 0); result0 = NIM_NIL; num0 = ((NI) 0); { while (1) { NI start0; if (!(i0 < length0)) goto LA2; { if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5; i0 += ((NI) 1); switch (((NU8)(frmt0->data[i0]))) { case 36: { add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_19)); i0 += ((NI) 1); } break; case 35: { i0 += ((NI) 1); add_179482_2381377266(&result0, args0[num0]); num0 += ((NI) 1); } break; case 48 ... 57: { NI j0; j0 = ((NI) 0); { while (1) { j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = (length0 <= i0); if (LOC14) goto LA15; LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))); LA15: ; if (!LOC14) goto LA16; goto LA10; } LA16: ; } } LA10: ; num0 = j0; { NimStringDesc* LOC22; NimStringDesc* LOC23; if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20; LOC22 = (NimStringDesc*)0; LOC23 = (NimStringDesc*)0; LOC23 = nimIntToStr(j0); LOC22 = rawNewString(LOC23->Sup.len + 30); appendString(LOC22, ((NimStringDesc*) &T839829468_20)); appendString(LOC22, LOC23); internalerror_197113_155036129(LOC22); } LA20: ; add_179482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]); } break; case 110: { { if (!!(((goptions_170128_2607990831 &(1U<<((NU)(((Toption170009) 10))&31U)))!=0))) goto LA27; add_179482_2381377266(&result0, rnl_179903_2381377266); } LA27: ; i0 += ((NI) 1); } break; case 78: { add_179482_2381377266(&result0, rnl_179903_2381377266); i0 += ((NI) 1); } break; default: { NimStringDesc* LOC31; LOC31 = (NimStringDesc*)0; LOC31 = rawNewString(31); appendString(LOC31, ((NimStringDesc*) &T839829468_20)); appendChar(LOC31, frmt0->data[i0]); internalerror_197113_155036129(LOC31); } break; } } goto LA3; LA5: ; { NIM_BOOL LOC33; NI j0; NimStringDesc* ident0; Ropeobj179006* LOC39; LOC33 = (NIM_BOOL)0; LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC33)) goto LA34; LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95))); LA34: ; if (!LOC33) goto LA35; i0 += ((NI) 1); j0 = i0; { while (1) { if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38; j0 += ((NI) 1); } LA38: ; } ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1))); i0 = j0; LOC39 = (Ropeobj179006*)0; LOC39 = cgsym_533403_839829468(m0, ident0); add_179482_2381377266(&result0, LOC39); } goto LA3; LA35: ; { NIM_BOOL LOC41; NI j0; NimStringDesc* LOC47; Ropeobj179006* LOC48; LOC41 = (NIM_BOOL)0; LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC41)) goto LA42; LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36)); LA42: ; if (!LOC41) goto LA43; i0 += ((NI) 2); j0 = ((NI) 0); { while (1) { if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46; j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); } LA46: ; } LOC47 = (NimStringDesc*)0; LOC47 = HEX24_179856_2381377266(args0[(NI)(j0 - ((NI) 1))]); LOC48 = (Ropeobj179006*)0; LOC48 = cgsym_533403_839829468(m0, LOC47); add_179482_2381377266(&result0, LOC48); } goto LA3; LA43: ; LA3: ; start0 = i0; { while (1) { if (!(i0 < length0)) goto LA50; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36))); if (!(LOC53)) goto LA54; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35))); LA54: ; if (!LOC53) goto LA55; i0 += ((NI) 1); } goto LA51; LA55: ; { goto LA49; } LA51: ; } LA50: ; } LA49: ; { NimStringDesc* LOC62; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60; LOC62 = (NimStringDesc*)0; LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1))); add_179487_2381377266(&result0, LOC62); } LA60: ; } LA2: ; } return result0; } static N_INLINE(NIM_BOOL, crossescppboundary_561754_839829468)(Tcgen530027* m0, Tsym293834* sym0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; Tsym293834* LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); if (!(LOC2)) goto LA3; LOC4 = (Tsym293834*)0; LOC4 = getmodule_300123_2984716966(sym0); LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA5; LOC1 = !((gcmd_170132_2607990831 == ((Tcommands170076) 2))); LA5: ; result0 = LOC1; return result0; } N_NIMCALL(void, genprocprototype_540254_839829468)(Tcgen530027* m0, Tsym293834* sym0) { { useheader_533369_839829468(m0, sym0); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 4))&15U)))!=0)) goto LA7; { NIM_BOOL LOC11; Tsym293834* LOC12; NIM_BOOL LOC14; TY533811 LOC17; Ropeobj179006* LOC18; LOC11 = (NIM_BOOL)0; LOC12 = (Tsym293834*)0; LOC12 = getmodule_300123_2984716966(sym0); LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id)); if (!(LOC11)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_269862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC11 = !(LOC14); LA13: ; if (!LOC11) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_536671_839829468(m0, (*sym0).loc.t); LOC17[1] = mangledynlibproc_539816_839829468(sym0); LOC18 = (Ropeobj179006*)0; LOC18 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], LOC18); } LA15: ; } goto LA5; LA7: ; { NIM_BOOL LOC20; Ropeobj179006* header0; TY179507 LOC47; Ropeobj179006* LOC48; LOC20 = (NIM_BOOL)0; LOC20 = containsorincl_269862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id); if (!!(LOC20)) goto LA21; header0 = genprocheader_536867_839829468(m0, sym0); { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 14))&31U)))!=0); if (!(LOC25)) goto LA26; LOC25 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 6))&7U)))!=0); LA26: ; if (!LOC25) goto LA27; header0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA27: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention293002) 5))); if (!(LOC31)) goto LA32; LOC31 = crossescppboundary_561754_839829468(m0, sym0); LA32: ; if (!LOC31) goto LA33; header0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_246), header0); } LA33: ; { NIM_BOOL LOC37; LOC37 = (NIM_BOOL)0; LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0); if (!(LOC37)) goto LA38; LOC37 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 7))&7U)))!=0); LA38: ; if (!LOC37) goto LA39; add_179487_2381377266(&header0, ((NimStringDesc*) &T839829468_247)); } LA39: ; { NIM_BOOL LOC43; LOC43 = (NIM_BOOL)0; LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 14))&31U)))!=0); if (!(LOC43)) goto LA44; LOC43 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 7))&7U)))!=0); LA44: ; if (!LOC43) goto LA45; add_179487_2381377266(&header0, ((NimStringDesc*) &T839829468_248)); } LA45: ; memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = header0; LOC48 = (Ropeobj179006*)0; LOC48 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 7))- 0], LOC48); } goto LA5; LA21: ; LA5: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, usesnativegc_170177_2607990831)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((Tgcmode170080) 5) <= gselectedgc_170133_2607990831); return result0; } N_NIMCALL(void, genrefassign_539311_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY533811 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc293812) 2)); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = usesnativegc_170177_2607990831(); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(dest0); LOC8[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2); } goto LA1; LA6: ; { if (!(dest0.s == ((Tstorageloc293812) 3))) goto LA10; { NIM_BOOL LOC14; TY533811 LOC17; LOC14 = (NIM_BOOL)0; LOC14 = canformacycle_321123_3876443242(dest0.t); if (!LOC14) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_539204_839829468(dest0); LOC17[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2); } goto LA12; LA15: ; { TY533811 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_539204_839829468(dest0); LOC19[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2); } LA12: ; } goto LA1; LA10: ; { TY533811 LOC21; memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = addrloc_539204_839829468(dest0); LOC21[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2); } LA1: ; } N_NIMCALL(void, optasgnloc_550788_839829468)(Tloc293816 a0, Ttype293840* t0, Ropeobj179006* field0, Tloc293816* Result) { Ropeobj179006* LOC1; Ropeobj179006* LOC2; (*Result).k = ((Tlockind293808) 5); (*Result).s = a0.s; unsureAsgnRef((void**) (&(*Result).t), t0); LOC1 = (Ropeobj179006*)0; LOC1 = rdloc_539188_839829468(a0); LOC2 = (Ropeobj179006*)0; LOC2 = HEX26_179447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257)); unsureAsgnRef((void**) (&(*Result).r), HEX26_179418_2381377266(LOC2, field0)); } N_NIMCALL(void, genoptasgntuple_551001_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0) { Tassignmentflag539302Set newflags0; Ttype293840* t_551053_839829468; Ttype293840* LOC9; { if (!(src0.s == ((Tstorageloc293812) 1))) goto LA3; newflags0 = (flags0 | 1); } goto LA1; LA3: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag293431) 6))&31U)))!=0)) goto LA6; newflags0 = (flags0 & ~ 1); } goto LA1; LA6: ; { newflags0 = flags0; } LA1: ; LOC9 = (Ttype293840*)0; LOC9 = skiptypes_297099_850551059(dest0.t, IL64(211106232576256)); t_551053_839829468 = getuniquetype_529640_2036603609(LOC9); { NI i_551071_839829468; NI HEX3Atmp_551077_839829468; NI LOC11; NI res_551080_839829468; i_551071_839829468 = (NI)0; HEX3Atmp_551077_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = len_296339_850551059(t_551053_839829468); HEX3Atmp_551077_839829468 = (LOC11 - 1); res_551080_839829468 = ((NI) 0); { while (1) { Ttype293840* t0; Ropeobj179006* field0; TY179507 LOC14; Tloc293816 LOC15; Tloc293816 LOC16; if (!(res_551080_839829468 <= HEX3Atmp_551077_839829468)) goto LA13; i_551071_839829468 = res_551080_839829468; t0 = (*t_551053_839829468).sons->data[i_551071_839829468]; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_179401_2381377266(((NI64) (i_551071_839829468))); field0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_550788_839829468(dest0, t0, field0, (&LOC15)); memset((void*)(&LOC16), 0, sizeof(LOC16)); optasgnloc_550788_839829468(src0, t0, field0, (&LOC16)); genassignment_540264_839829468(p0, LOC15, LOC16, newflags0); res_551080_839829468 += ((NI) 1); } LA13: ; } } } N_NIMCALL(void, gengenericasgn_551167_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0) { { NIM_BOOL LOC3; Ttype293840* LOC5; LOC3 = (NIM_BOOL)0; LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag539302) 0))&7U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype293840*)0; LOC5 = skiptypes_297099_850551059(dest0.t, IL64(211106242013440)); LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag293431) 6))&31U)))!=0); LA4: ; if (!LOC3) goto LA6; { NIM_BOOL LOC10; NIM_BOOL LOC12; TY536238 LOC15; LOC10 = (NIM_BOOL)0; LOC10 = (dest0.s == ((Tstorageloc293812) 2)); if (LOC10) goto LA11; LOC12 = (NIM_BOOL)0; LOC12 = usesnativegc_170177_2607990831(); LOC10 = !(LOC12); LA11: ; if (!LOC10) goto LA13; usestringh_533345_839829468((*p0).module); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = addrloc_539204_839829468(dest0); LOC15[1] = addrloc_539204_839829468(src0); LOC15[2] = rdloc_539188_839829468(dest0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3); } goto LA8; LA13: ; { TY536238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_539204_839829468(dest0); LOC17[1] = addrloc_539204_839829468(src0); LOC17[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3); } LA8: ; } goto LA1; LA6: ; { TY536238 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_539204_839829468(dest0); LOC19[1] = addrloc_539204_839829468(src0); LOC19[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3); } LA1: ; } N_NIMCALL(NI, asgncomplexity_550750_839829468)(Tnode293802* n0) { NI result0; result0 = (NI)0; { if (!!((n0 == NIM_NIL))) goto LA3; switch ((*n0).kind) { case ((Tnodekind293020) 3): { result0 = ((NI) 1); } break; case ((Tnodekind293020) 139): { result0 = ((NI) 100); } break; case ((Tnodekind293020) 138): { { Tnode293802* t_550767_839829468; t_550767_839829468 = (Tnode293802*)0; { NI i_550781_839829468; NI HEX3Atmp_550783_839829468; NI LOC10; NI res_550785_839829468; i_550781_839829468 = (NI)0; HEX3Atmp_550783_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = len_294081_850551059(n0); HEX3Atmp_550783_839829468 = (LOC10 - 1); res_550785_839829468 = ((NI) 0); { while (1) { NI LOC13; if (!(res_550785_839829468 <= HEX3Atmp_550783_839829468)) goto LA12; i_550781_839829468 = res_550785_839829468; t_550767_839829468 = (*n0).kindU.S6.sons->data[i_550781_839829468]; LOC13 = (NI)0; LOC13 = asgncomplexity_550750_839829468(t_550767_839829468); result0 += LOC13; res_550785_839829468 += ((NI) 1); } LA12: ; } } } } break; default: { } break; } } LA3: ; return result0; } N_NIMCALL(void, genoptasgnobject_551084_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0, Tnode293802* t0) { Tassignmentflag539302Set newflags0; { { if (!(t0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; { if (!(src0.s == ((Tstorageloc293812) 1))) goto LA7; newflags0 = (flags0 | 1); } goto LA5; LA7: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag293431) 6))&31U)))!=0)) goto LA10; newflags0 = (flags0 & ~ 1); } goto LA5; LA10: ; { newflags0 = flags0; } LA5: ; switch ((*t0).kind) { case ((Tnodekind293020) 3): { Tsym293834* field0; Tloc293816 LOC14; Tloc293816 LOC15; field0 = (*t0).kindU.S4.sym; memset((void*)(&LOC14), 0, sizeof(LOC14)); optasgnloc_550788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14)); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_550788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15)); genassignment_540264_839829468(p0, LOC14, LOC15, newflags0); } break; case ((Tnodekind293020) 138): { { Tnode293802* child_551155_839829468; child_551155_839829468 = (Tnode293802*)0; { NI i_551160_839829468; NI HEX3Atmp_551162_839829468; NI LOC19; NI res_551164_839829468; i_551160_839829468 = (NI)0; HEX3Atmp_551162_839829468 = (NI)0; LOC19 = (NI)0; LOC19 = len_294081_850551059(t0); HEX3Atmp_551162_839829468 = (LOC19 - 1); res_551164_839829468 = ((NI) 0); { while (1) { if (!(res_551164_839829468 <= HEX3Atmp_551162_839829468)) goto LA21; i_551160_839829468 = res_551164_839829468; child_551155_839829468 = (*t0).kindU.S6.sons->data[i_551160_839829468]; genoptasgnobject_551084_839829468(p0, dest0, src0, newflags0, child_551155_839829468); res_551164_839829468 += ((NI) 1); } LA21: ; } } } } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, genassignment_540264_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0, Tassignmentflag539302Set flags0) { Ttype293840* ty0; { { NIM_BOOL LOC3; TY533811 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = !((src0.t == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = ((*src0.t).kind == ((Ttypekind293244) 21)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_539188_839829468(dest0); LOC7[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2); goto BeforeRet; } LA5: ; ty0 = skiptypes_297099_850551059(dest0.t, IL64(211106233624832)); switch ((*ty0).kind) { case ((Ttypekind293244) 22): { genrefassign_539311_839829468(p0, dest0, src0, flags0); } break; case ((Ttypekind293244) 24): { { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag539302) 0))&7U)))!=0)); if (!(LOC12)) goto LA13; LOC12 = !((src0.s == ((Tstorageloc293812) 1))); LA13: ; if (!LOC12) goto LA14; genrefassign_539311_839829468(p0, dest0, src0, flags0); } goto LA10; LA14: ; { TY536238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_539204_839829468(dest0); LOC17[1] = rdloc_539188_839829468(src0); LOC17[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3); } LA10: ; } break; case ((Ttypekind293244) 28): { { NIM_BOOL LOC21; LOC21 = (NIM_BOOL)0; LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag539302) 0))&7U)))!=0)); if (!(LOC21)) goto LA22; LOC21 = !((src0.s == ((Tstorageloc293812) 1))); LA22: ; if (!LOC21) goto LA23; genrefassign_539311_839829468(p0, dest0, src0, flags0); } goto LA19; LA23: ; { { NIM_BOOL LOC28; NIM_BOOL LOC30; TY533811 LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (dest0.s == ((Tstorageloc293812) 2)); if (LOC28) goto LA29; LOC30 = (NIM_BOOL)0; LOC30 = usesnativegc_170177_2607990831(); LOC28 = !(LOC30); LA29: ; if (!LOC28) goto LA31; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_539188_839829468(dest0); LOC33[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2); } goto LA26; LA31: ; { Tloc293816 tmp0; TY536238 LOC37; TY179507 LOC38; if (!(dest0.s == ((Tstorageloc293812) 3))) goto LA35; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, ty0, (&tmp0), NIM_FALSE); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_539188_839829468(dest0); LOC37[1] = rdloc_539188_839829468(src0); LOC37[2] = rdloc_539188_839829468(tmp0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_539188_839829468(tmp0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1); } goto LA26; LA35: ; { TY533811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = addrloc_539204_839829468(dest0); LOC40[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2); } LA26: ; } LA19: ; } break; case ((Ttypekind293244) 25): { { NIM_BOOL LOC44; Tloc293816 a0; Ropeobj179006* LOC47; Tloc293816 LOC48; Tloc293816 b0; Ropeobj179006* LOC49; Tloc293816 LOC50; TY533811 LOC51; LOC44 = (NIM_BOOL)0; LOC44 = needscomplexassignment_534509_839829468(dest0.t); if (!LOC44) goto LA45; memset((void*)(&a0), 0, sizeof(a0)); LOC47 = (Ropeobj179006*)0; LOC47 = rope_179277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC48), 0, sizeof(LOC48)); optasgnloc_550788_839829468(dest0, dest0.t, LOC47, (&LOC48)); memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); LOC49 = (Ropeobj179006*)0; LOC49 = rope_179277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC50), 0, sizeof(LOC50)); optasgnloc_550788_839829468(src0, dest0.t, LOC49, (&LOC50)); memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0)); genrefassign_539311_839829468(p0, a0, b0, flags0); memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_539188_839829468(dest0); LOC51[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2); } goto LA42; LA45: ; { TY533811 LOC53; memset((void*)LOC53, 0, sizeof(LOC53)); LOC53[0] = rdloc_539188_839829468(dest0); LOC53[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2); } LA42: ; } break; case ((Ttypekind293244) 18): { { NIM_BOOL LOC57; LOC57 = (NIM_BOOL)0; LOC57 = needscomplexassignment_534509_839829468(dest0.t); if (!LOC57) goto LA58; { NI LOC62; LOC62 = (NI)0; LOC62 = len_296339_850551059(dest0.t); if (!(LOC62 <= ((NI) 4))) goto LA63; genoptasgntuple_551001_839829468(p0, dest0, src0, flags0); } goto LA60; LA63: ; { gengenericasgn_551167_839829468(p0, dest0, src0, flags0); } LA60: ; } goto LA55; LA58: ; { TY533811 LOC67; memset((void*)LOC67, 0, sizeof(LOC67)); LOC67[0] = rdloc_539188_839829468(dest0); LOC67[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2); } LA55: ; } break; case ((Ttypekind293244) 17): { { NIM_BOOL LOC71; TY533811 LOC74; LOC71 = (NIM_BOOL)0; LOC71 = isimportedcpptype_534476_839829468(ty0); if (!LOC71) goto LA72; memset((void*)LOC74, 0, sizeof(LOC74)); LOC74[0] = rdloc_539188_839829468(dest0); LOC74[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2); } goto LA69; LA72: ; { NIM_BOOL LOC76; LOC76 = (NIM_BOOL)0; LOC76 = isobjlackingtypefield_534513_839829468(ty0); if (!!(LOC76)) goto LA77; gengenericasgn_551167_839829468(p0, dest0, src0, flags0); } goto LA69; LA77: ; { NIM_BOOL LOC80; LOC80 = (NIM_BOOL)0; LOC80 = needscomplexassignment_534509_839829468(ty0); if (!LOC80) goto LA81; { NIM_BOOL LOC85; NI LOC87; Ropeobj179006* LOC90; LOC85 = (NIM_BOOL)0; LOC85 = (*ty0).sons->data[((NI) 0)] == 0; if (!(LOC85)) goto LA86; LOC87 = (NI)0; LOC87 = asgncomplexity_550750_839829468((*ty0).n); LOC85 = (LOC87 <= ((NI) 4)); LA86: ; if (!LOC85) goto LA88; LOC90 = (Ropeobj179006*)0; LOC90 = gettypedesc_536671_839829468((*p0).module, ty0); ty0 = getuniquetype_529640_2036603609(ty0); { NimStringDesc* LOC95; if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93; LOC95 = (NimStringDesc*)0; LOC95 = HEX24_197185_1689653243(T839829468_264); internalerror_197113_155036129(LOC95); } LA93: ; genoptasgnobject_551084_839829468(p0, dest0, src0, flags0, (*ty0).n); } goto LA83; LA88: ; { gengenericasgn_551167_839829468(p0, dest0, src0, flags0); } LA83: ; } goto LA69; LA81: ; { TY533811 LOC98; memset((void*)LOC98, 0, sizeof(LOC98)); LOC98[0] = rdloc_539188_839829468(dest0); LOC98[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2); } LA69: ; } break; case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = needscomplexassignment_534509_839829468(dest0.t); if (!LOC102) goto LA103; gengenericasgn_551167_839829468(p0, dest0, src0, flags0); } goto LA100; LA103: ; { TY536238 LOC106; usestringh_533345_839829468((*p0).module); memset((void*)LOC106, 0, sizeof(LOC106)); LOC106[0] = rdloc_539188_839829468(dest0); LOC106[1] = rdloc_539188_839829468(src0); LOC106[2] = gettypedesc_536671_839829468((*p0).module, ty0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3); } LA100: ; } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { { NIM_BOOL LOC110; TY536238 LOC113; LOC110 = (NIM_BOOL)0; LOC110 = needscomplexassignment_534509_839829468(dest0.t); if (!LOC110) goto LA111; memset((void*)LOC113, 0, sizeof(LOC113)); LOC113[0] = addrloc_539204_839829468(dest0); LOC113[1] = addrloc_539204_839829468(src0); LOC113[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3); } goto LA108; LA111: ; { TY533811 LOC115; usestringh_533345_839829468((*p0).module); memset((void*)LOC115, 0, sizeof(LOC115)); LOC115[0] = rdloc_539188_839829468(dest0); LOC115[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2); } LA108: ; } break; case ((Ttypekind293244) 19): { { Tctypekind530007 LOC119; TY536238 LOC122; NI64 LOC123; LOC119 = (Tctypekind530007)0; LOC119 = maptype_534393_839829468(ty0); if (!(LOC119 == ((Tctypekind530007) 17))) goto LA120; usestringh_533345_839829468((*p0).module); memset((void*)LOC122, 0, sizeof(LOC122)); LOC122[0] = rdloc_539188_839829468(dest0); LOC122[1] = rdloc_539188_839829468(src0); LOC123 = (NI64)0; LOC123 = getsize_321135_3876443242(dest0.t); LOC122[2] = rope_179401_2381377266(LOC123); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3); } goto LA117; LA120: ; { TY533811 LOC125; memset((void*)LOC125, 0, sizeof(LOC125)); LOC125[0] = rdloc_539188_839829468(dest0); LOC125[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2); } LA117: ; } break; case ((Ttypekind293244) 21): case ((Ttypekind293244) 26): case ((Ttypekind293244) 2): case ((Ttypekind293244) 1): case ((Ttypekind293244) 14): case ((Ttypekind293244) 29): case ((Ttypekind293244) 31) ... ((Ttypekind293244) 44): case ((Ttypekind293244) 20): case ((Ttypekind293244) 23): { TY533811 LOC127; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rdloc_539188_839829468(dest0); LOC127[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2); } break; default: { NimStringDesc* LOC129; LOC129 = (NimStringDesc*)0; LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI293244))->Sup.len + 15); appendString(LOC129, ((NimStringDesc*) &T839829468_269)); appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI293244))); internalerror_197113_155036129(LOC129); } break; } }BeforeRet: ; } N_NIMCALL(void, putlocintodest_540258_839829468)(Tcproc530021* p0, Tloc293816* d0, Tloc293816 s0) { { if (!!(((*d0).k == ((Tlockind293808) 0)))) goto LA3; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag293810) 2))&15U)))!=0)) goto LA7; genassignment_540264_839829468(p0, (*d0), s0, 0); } goto LA5; LA7: ; { genassignment_540264_839829468(p0, (*d0), s0, 1); } LA5: ; } goto LA1; LA3: ; { genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI293816)); } LA1: ; } N_NIMCALL(NIM_BOOL, issimpleconst_533311_839829468)(Ttype293840* typ0) { NIM_BOOL result0; Ttype293840* t0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; t0 = skiptypes_297099_850551059(typ0, IL64(211106240964864)); LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).kind == ((Ttypekind293244) 18) || (*t0).kind == ((Ttypekind293244) 17) || (*t0).kind == ((Ttypekind293244) 16) || (*t0).kind == ((Ttypekind293244) 4) || (*t0).kind == ((Ttypekind293244) 19) || (*t0).kind == ((Ttypekind293244) 24))); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind293244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention293002) 8)); LA4: ; LOC1 = !(LOC3); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putintodest_551468_839829468)(Tcproc530021* p0, Tloc293816* d0, Ttype293840* t0, Ropeobj179006* r0, Tstorageloc293812 s0) { Tloc293816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind293808) 0)))) goto LA3; initloc_533273_839829468((&a0), ((Tlockind293808) 6), t0, s0); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag293810) 2))&15U)))!=0)) goto LA7; genassignment_540264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_540264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind293808) 6); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NI64, bitsettoword_550578_839829468)(Tbitset340004* s0, NI size0) { NI64 result0; result0 = (NI64)0; result0 = IL64(0); { NI j_550612_839829468; NI HEX3Atmp_550622_839829468; NI res_550625_839829468; j_550612_839829468 = (NI)0; HEX3Atmp_550622_839829468 = (NI)0; HEX3Atmp_550622_839829468 = (NI)(size0 - ((NI) 1)); res_550625_839829468 = ((NI) 0); { while (1) { if (!(res_550625_839829468 <= HEX3Atmp_550622_839829468)) goto LA3; j_550612_839829468 = res_550625_839829468; { if (!(j_550612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6; result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_550612_839829468]))) << (NU64)(((NI64) ((NI)(j_550612_839829468 * ((NI) 8))))))); } LA6: ; res_550625_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(Ropeobj179006*, genrawsetdata_550629_839829468)(Tbitset340004* cs0, NI size0) { Ropeobj179006* result0; NimStringDesc* frmt0; result0 = (Ropeobj179006*)0; frmt0 = (NimStringDesc*)0; { TY534289 LOC5; if (!(((NI) 8) < size0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0); { NI i_550649_839829468; NI HEX3Atmp_550657_839829468; NI res_550660_839829468; i_550649_839829468 = (NI)0; HEX3Atmp_550657_839829468 = (NI)0; HEX3Atmp_550657_839829468 = (NI)(size0 - ((NI) 1)); res_550660_839829468 = ((NI) 0); { while (1) { TY179507 LOC19; NimStringDesc* LOC20; if (!(res_550660_839829468 <= HEX3Atmp_550657_839829468)) goto LA8; i_550649_839829468 = res_550660_839829468; { if (!(i_550649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11; { if (!(((NI) ((NI)((NI)(i_550649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15; frmt0 = copyString(((NimStringDesc*) &T839829468_274)); } goto LA13; LA15: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_275)); } LA13: ; } goto LA9; LA11: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_276)); } LA9: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (NimStringDesc*)0; LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_550649_839829468])), ((NI) 2)); LOC19[0] = rope_179277_2381377266(LOC20); addf_180205_2381377266(&result0, frmt0, LOC19, 1); res_550660_839829468 += ((NI) 1); } LA8: ; } } } goto LA1; LA3: ; { NI64 LOC22; LOC22 = (NI64)0; LOC22 = bitsettoword_550578_839829468(cs0, size0); result0 = intliteral_540270_839829468(LOC22); } LA1: ; return result0; } N_NIMCALL(void, appcg_533640_839829468)(Tcgen530027* m0, Tcfilesection530005 s0, NimStringDesc* frmt0, Ropeobj179006** args0, NI args0Len0) { Ropeobj179006* LOC1; LOC1 = (Ropeobj179006*)0; LOC1 = ropecg_533407_839829468(m0, frmt0, args0, args0Len0); add_179482_2381377266(&(*m0).s[(s0)- 0], LOC1); } N_NIMCALL(Ropeobj179006*, genconstseq_560371_839829468)(Tcproc530021* p0, Tnode293802* n0, Ttype293840* t0) { Ropeobj179006* result0; Ropeobj179006* data0; TY179507 LOC1; NI LOC2; TY536235 LOC18; NI LOC19; TY533811 LOC20; result0 = (Ropeobj179006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = len_294081_850551059(n0); LOC1[0] = rope_179401_2381377266(((NI64) (LOC2))); data0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1); { NI LOC5; LOC5 = (NI)0; LOC5 = len_294081_850551059(n0); if (!(((NI) 0) < LOC5)) goto LA6; add_179487_2381377266(&data0, ((NimStringDesc*) &T839829468_278)); { NI i_560395_839829468; NI HEX3Atmp_560411_839829468; NI LOC9; NI res_560414_839829468; i_560395_839829468 = (NI)0; HEX3Atmp_560411_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = len_294081_850551059(n0); HEX3Atmp_560411_839829468 = (NI)(LOC9 - ((NI) 1)); res_560414_839829468 = ((NI) 0); { while (1) { Ropeobj179006* LOC17; if (!(res_560414_839829468 <= HEX3Atmp_560411_839829468)) goto LA11; i_560395_839829468 = res_560414_839829468; { TY534289 LOC16; if (!(((NI) 0) < i_560395_839829468)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); addf_180205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0); } LA14: ; LOC17 = (Ropeobj179006*)0; LOC17 = genconstexpr_555849_839829468(p0, (*n0).kindU.S6.sons->data[i_560395_839829468]); add_179482_2381377266(&data0, LOC17); res_560414_839829468 += ((NI) 1); } LA11: ; } } add_179487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); } LA6: ; add_179487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); result0 = gettempname_534596_839829468((*p0).module); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = gettypedesc_536671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); LOC19 = (NI)0; LOC19 = len_294081_850551059(n0); LOC18[1] = rope_179401_2381377266(((NI64) (LOC19))); LOC18[2] = result0; LOC18[3] = data0; appcg_533640_839829468((*p0).module, ((Tcfilesection530005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4); memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC20[1] = result0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2); return result0; } N_NIMCALL(Ropeobj179006*, gennamedconstexpr_560284_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!((*n0).kind == ((Tnodekind293020) 34))) goto LA3; result0 = genconstexpr_555849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA3: ; { result0 = genconstexpr_555849_839829468(p0, n0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, genconstsimplelist_560299_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; NI length0; TY534289 LOC10; result0 = (Ropeobj179006*)0; length0 = sonslen_296351_850551059(n0); result0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_223)); { NI i_560333_839829468; NI HEX3Atmp_560362_839829468; NI HEX3Atmp_560363_839829468; NI res_560366_839829468; i_560333_839829468 = (NI)0; HEX3Atmp_560362_839829468 = (NI)0; HEX3Atmp_560363_839829468 = (NI)0; HEX3Atmp_560362_839829468 = ((*n0).kind == ((Tnodekind293020) 38)); HEX3Atmp_560363_839829468 = (NI)(length0 - ((NI) 2)); res_560366_839829468 = ((NI) (HEX3Atmp_560362_839829468)); { while (1) { TY179507 LOC4; if (!(res_560366_839829468 <= HEX3Atmp_560363_839829468)) goto LA3; i_560333_839829468 = res_560366_839829468; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = gennamedconstexpr_560284_839829468(p0, (*n0).kindU.S6.sons->data[i_560333_839829468]); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1); res_560366_839829468 += ((NI) 1); } LA3: ; } } { Ropeobj179006* LOC9; if (!(((NI) (((*n0).kind == ((Tnodekind293020) 38)))) < length0)) goto LA7; LOC9 = (Ropeobj179006*)0; LOC9 = gennamedconstexpr_560284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]); add_179482_2381377266(&result0, LOC9); } LA7: ; memset((void*)LOC10, 0, sizeof(LOC10)); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0); return result0; } N_NIMCALL(Ropeobj179006*, genconstexpr_555849_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; switch ((*n0).kind) { case ((Tnodekind293020) 58): case ((Tnodekind293020) 59): { result0 = genconstexpr_555849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } break; case ((Tnodekind293020) 39): { Tbitset340004* cs0; NI64 LOC3; cs0 = (Tbitset340004*)0; tobitset_341001_452470228(n0, (&cs0)); LOC3 = (NI64)0; LOC3 = getsize_321135_3876443242((*n0).typ); result0 = genrawsetdata_550629_839829468(cs0, ((NI) (LOC3))); } break; case ((Tnodekind293020) 41): case ((Tnodekind293020) 37): case ((Tnodekind293020) 155): case ((Tnodekind293020) 38): { Ttype293840* t0; t0 = skiptypes_297099_850551059((*n0).typ, IL64(211106232576256)); { if (!((*t0).kind == ((Ttypekind293244) 24))) goto LA7; result0 = genconstseq_560371_839829468(p0, n0, t0); } goto LA5; LA7: ; { result0 = genconstsimplelist_560299_839829468(p0, n0); } LA5: ; } break; default: { Tloc293816 d0; memset((void*)(&d0), 0, sizeof(d0)); initlocexpr_540283_839829468(p0, n0, (&d0)); result0 = rdloc_539188_839829468(d0); } break; } return result0; } N_NIMCALL(void, requestconstimpl_540240_839829468)(Tcproc530021* p0, Tsym293834* sym0) { Tcgen530027* m0; Tcgen530027* q0; { m0 = (*p0).module; useheader_533369_839829468(m0, sym0); { Ropeobj179006* LOC5; if (!((*sym0).loc.k == ((Tlockind293808) 0))) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = manglename_534205_839829468(sym0); fillloc_533282_839829468((&(*sym0).loc), ((Tlockind293808) 8), (*sym0).typ, LOC5, ((Tstorageloc293812) 1)); } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)) goto LA8; goto BeforeRet; } LA8: ; q0 = findpendingmodule_533241_839829468(m0, sym0); { NIM_BOOL LOC12; NIM_BOOL LOC14; TY536238 LOC17; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_269862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_536671_839829468(q0, (*sym0).typ); LOC17[1] = (*sym0).loc.r; LOC17[2] = genconstexpr_555849_839829468((*q0).initproc, (*sym0).ast); addf_180205_2381377266(&(*q0).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; { NIM_BOOL LOC20; NIM_BOOL LOC22; Ropeobj179006* headerdecl0; TY533811 LOC25; LOC20 = (NIM_BOOL)0; LOC20 = !((q0 == m0)); if (!(LOC20)) goto LA21; LOC22 = (NIM_BOOL)0; LOC22 = containsorincl_269862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC20 = !(LOC22); LA21: ; if (!LOC20) goto LA23; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = gettypedesc_536671_839829468(m0, (*sym0).loc.t); LOC25[1] = (*sym0).loc.r; headerdecl0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 8))- 0], headerdecl0); { NIM_BOOL LOC28; LOC28 = (NIM_BOOL)0; LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 6))&31U)))!=0); if (!(LOC28)) goto LA29; LOC28 = !((generatedheader_533201_839829468 == NIM_NIL)); LA29: ; if (!LOC28) goto LA30; add_179482_2381377266(&(*generatedheader_533201_839829468).s[(((Tcfilesection530005) 8))- 0], headerdecl0); } LA30: ; } LA23: ; }BeforeRet: ; } N_NIMCALL(void, gencomplexconst_559249_839829468)(Tcproc530021* p0, Tsym293834* sym0, Tloc293816* d0) { requestconstimpl_540240_839829468(p0, sym0); putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } static N_INLINE(Ropeobj179006**, procsec_530194_3723162438)(Tcproc530021* p0, Tcprocsection530011 s0) { Ropeobj179006** result0; result0 = (Ropeobj179006**)0; result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0]; return result0; } N_NIMCALL(void, accessthreadlocalvar_533945_839829468)(Tcproc530021* p0, Tsym293834* s0) { { NIM_BOOL LOC3; Ropeobj179006** LOC7; TY534289 LOC8; Ropeobj179006** LOC9; TY534289 LOC10; Ropeobj179006* LOC11; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_533949_839829468(); if (!(LOC3)) goto LA4; LOC3 = !((*p0).threadvaraccessed); LA4: ; if (!LOC3) goto LA5; (*p0).threadvaraccessed = NIM_TRUE; (*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag530025) 1))%(sizeof(NU8)*8)); LOC7 = (Ropeobj179006**)0; LOC7 = procsec_530194_3723162438(p0, ((Tcprocsection530011) 0)); memset((void*)LOC8, 0, sizeof(LOC8)); addf_180205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0); LOC9 = (Ropeobj179006**)0; LOC9 = procsec_530194_3723162438(p0, ((Tcprocsection530011) 1)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC11 = (Ropeobj179006*)0; LOC11 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0); add_179482_2381377266(LOC9, LOC11); } LA5: ; } static N_INLINE(NIM_BOOL, isemptytype_298440_850551059)(Ttype293840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = (t0 == NIM_NIL); if (LOC1) goto LA2; LOC1 = ((*t0).kind == ((Ttypekind293244) 62) || (*t0).kind == ((Ttypekind293244) 7)); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putdataintodest_551436_839829468)(Tcproc530021* p0, Tloc293816* d0, Ttype293840* t0, Ropeobj179006* r0) { Tloc293816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind293808) 0)))) goto LA3; initloc_533273_839829468((&a0), ((Tlockind293808) 8), t0, ((Tstorageloc293812) 1)); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag293810) 2))&15U)))!=0)) goto LA7; genassignment_540264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_540264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind293808) 8); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NIM_BOOL, freshlineinfo_533818_839829468)(Tcproc530021* p0, Tlineinfo192336 info0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*p0).lastlineinfo.line == info0.line)); if (LOC3) goto LA4; LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex)); LA4: ; if (!LOC3) goto LA5; (*p0).lastlineinfo.line = info0.line; (*p0).lastlineinfo.fileindex = info0.fileindex; result0 = NIM_TRUE; } LA5: ; return result0; } N_NIMCALL(void, genlinedir_533823_839829468)(Tcproc530021* p0, Tnode293802* t0) { NI line0; Ropeobj179006** LOC11; NimStringDesc* LOC12; line0 = safelinenm_533721_839829468((*t0).info); { Ropeobj179006** LOC5; TY534289 LOC6; Ropeobj179006* LOC7; Ropeobj179006* LOC8; Ropeobj179006* LOC9; Ropeobj179006* LOC10; if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 28))&63U)))!=0)) goto LA3; LOC5 = (Ropeobj179006**)0; LOC5 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj179006*)0; LOC7 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0); LOC8 = (Ropeobj179006*)0; LOC8 = sourceline_193068_155036129((*t0).info); LOC9 = (Ropeobj179006*)0; LOC9 = HEX26_179418_2381377266(LOC7, LOC8); LOC10 = (Ropeobj179006*)0; LOC10 = HEX26_179418_2381377266(LOC9, rnl_179903_2381377266); add_179482_2381377266(LOC5, LOC10); } LA3: ; LOC11 = (Ropeobj179006**)0; LOC11 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); LOC12 = (NimStringDesc*)0; LOC12 = tofullpath_193264_155036129((*t0).info.fileindex); genclinedir_533725_839829468(LOC11, LOC12, line0); { NIM_BOOL LOC15; NIM_BOOL LOC17; LOC15 = (NIM_BOOL)0; LOC15 = ((163840 & (*p0).options) == 163840); if (!(LOC15)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = ((*p0).prc == NIM_NIL); if (LOC17) goto LA18; LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0)); LA18: ; LOC15 = LOC17; LA16: ; if (!LOC15) goto LA19; { NIM_BOOL LOC23; TY533811 LOC26; NimStringDesc* LOC27; LOC23 = (NIM_BOOL)0; LOC23 = freshlineinfo_533818_839829468(p0, (*t0).info); if (!LOC23) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rope_179401_2381377266(((NI64) (line0))); LOC27 = (NimStringDesc*)0; LOC27 = tofilename_193260_155036129((*t0).info.fileindex); LOC26[1] = makecstring_192638_155036129(LOC27); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2); } LA24: ; } goto LA13; LA19: ; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC32; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((98304 & (*p0).options) == 98304); if (!(LOC30)) goto LA31; LOC32 = (NIM_BOOL)0; LOC32 = ((*p0).prc == NIM_NIL); if (LOC32) goto LA33; LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0)); LA33: ; LOC30 = LOC32; LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA34; LOC29 = (((NI32) 0) <= (*t0).info.fileindex); LA34: ; if (!LOC29) goto LA35; { NIM_BOOL LOC39; TY533811 LOC42; LOC39 = (NIM_BOOL)0; LOC39 = freshlineinfo_533818_839829468(p0, (*t0).info); if (!LOC39) goto LA40; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rope_179401_2381377266(((NI64) (line0))); LOC42[1] = quotedfilename_197818_155036129((*t0).info); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2); } LA40: ; } goto LA13; LA35: ; LA13: ; } N_NIMCALL(Ropeobj179006*, getlabel_540217_839829468)(Tcproc530021* p0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj179006*)0; LOC1 = rope_179401_2381377266(((NI64) ((*p0).labels))); result0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_296), LOC1); return result0; } N_NIMCALL(void, fixlabel_540230_839829468)(Tcproc530021* p0, Ropeobj179006* labl0) { TY179507 LOC1; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = labl0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1); } N_NIMCALL(void, genandor_555311_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0) { Ropeobj179006* L0; Tloc293816 tmp0; L0 = (Ropeobj179006*)0; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); (*p0).splitdecls += ((NI) 1); expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); L0 = getlabel_540217_839829468(p0); { TY533811 LOC5; if (!(m0 == ((Tmagic293524) 127))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(tmp0); LOC5[1] = L0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2); } goto LA1; LA3: ; { TY533811 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_539188_839829468(tmp0); LOC7[1] = L0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2); } LA1: ; expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0)); fixlabel_540230_839829468(p0, L0); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA10; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI293816)); } goto LA8; LA10: ; { genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA8: ; (*p0).splitdecls -= ((NI) 1); } N_NIMCALL(void, unaryarith_553646_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0) { Tloc293816 a0; Ttype293840* t0; TY536238 LOC1; NI64 LOC2; Ropeobj179006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype293840*)0; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_297099_850551059((*e0).typ, IL64(211106233624832)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); LOC2 = (NI64)0; LOC2 = getsize_321135_3876443242(t0); LOC1[1] = rope_179401_2381377266((NI64)(LOC2 * IL64(8))); LOC1[2] = getsimpletypedesc_534936_839829468((*p0).module, (*e0).typ); LOC3 = (Ropeobj179006*)0; LOC3 = HEX25_179905_2381377266(unarithtab_553653_839829468[(op0)- 99], LOC1, 3); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc293812) 0)); } N_NIMCALL(void, unaryarithoverflow_552633_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0) { Tloc293816 a0; Ttype293840* t0; TY533811 LOC7; NI64 LOC8; Ropeobj179006* LOC9; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype293840*)0; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_297099_850551059((*e0).typ, IL64(211106233624832)); { TY533811 LOC5; NI64 LOC6; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 5))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(a0); LOC6 = (NI64)0; LOC6 = firstord_321001_3876443242(t0); LOC5[1] = intliteral_540270_839829468(LOC6); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2); } LA3: ; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_539188_839829468(a0); LOC8 = (NI64)0; LOC8 = getsize_321135_3876443242(t0); LOC7[1] = rope_179401_2381377266((NI64)(LOC8 * IL64(8))); LOC9 = (Ropeobj179006*)0; LOC9 = HEX25_179905_2381377266(opr_552640_839829468[(m0)- 96], LOC7, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc293812) 0)); } N_NIMCALL(void, binaryarith_552819_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0) { Tloc293816 a0; Tloc293816 b0; NI64 s0; NI64 LOC1; NI64 LOC2; TY536235 LOC3; Ropeobj179006* LOC4; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); s0 = (NI64)0; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (NI64)0; LOC1 = getsize_321135_3876443242(a0.t); LOC2 = (NI64)0; LOC2 = getsize_321135_3876443242(b0.t); s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8)); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = rdloc_539188_839829468(a0); LOC3[1] = rdloc_539188_839829468(b0); LOC3[2] = rope_179401_2381377266(s0); LOC3[3] = getsimpletypedesc_534936_839829468((*p0).module, (*e0).typ); LOC4 = (Ropeobj179006*)0; LOC4 = HEX25_179905_2381377266(binarithtab_552826_839829468[(op0)- 52], LOC3, 4); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc293812) 0)); } N_NIMCALL(void, binaryfloatarith_557728_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0) { { Tloc293816 a0; Tloc293816 b0; TY536235 LOC5; Tnode293802* LOC6; Ropeobj179006* LOC7; if (!!(((384 & (*p0).options) == 0))) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_179277_2381377266(opr_557762_839829468[(m0)- 52]); LOC5[1] = rdloc_539188_839829468(a0); LOC5[2] = rdloc_539188_839829468(b0); LOC6 = (Tnode293802*)0; LOC6 = HEX5BHEX5D_294238_850551059(e0, ((NI) 1)); LOC5[3] = getsimpletypedesc_534936_839829468((*p0).module, (*LOC6).typ); LOC7 = (Ropeobj179006*)0; LOC7 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc293812) 0)); { TY179507 LOC12; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 7))&31U)))!=0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_539188_839829468((*d0)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1); } LA10: ; { TY179507 LOC17; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 8))&31U)))!=0)) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_539188_839829468((*d0)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1); } LA15: ; } goto LA1; LA3: ; { binaryarith_552819_839829468(p0, e0, d0, m0); } LA1: ; } N_NIMCALL(void, geneqproc_553214_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype293840* LOC3; TY533811 LOC6; Ropeobj179006* LOC7; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059(a0.t, IL64(211106232576256)); if (!((*LOC3).callconv == ((Tcallingconvention293002) 8))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_539188_839829468(a0); LOC6[1] = rdloc_539188_839829468(b0); LOC7 = (Ropeobj179006*)0; LOC7 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc293812) 0)); } goto LA1; LA4: ; { TY533811 LOC9; Ropeobj179006* LOC10; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rdloc_539188_839829468(a0); LOC9[1] = rdloc_539188_839829468(b0); LOC10 = (Ropeobj179006*)0; LOC10 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc293812) 0)); } LA1: ; } N_NIMCALL(Ropeobj179006*, rdcharloc_539227_839829468)(Tloc293816 a0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = rdloc_539188_839829468(a0); { Ttype293840* LOC3; TY179507 LOC6; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059(a0.t, IL64(211106233624832)); if (!((*LOC3).kind == ((Ttypekind293244) 2))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1); } LA4: ; return result0; } N_NIMCALL(Ropeobj179006*, binaryarithoverflowraw_552235_839829468)(Tcproc530021* p0, Ttype293840* t0, Tloc293816 a0, Tloc293816 b0, NimStringDesc* frmt0) { Ropeobj179006* result0; NI64 size0; Ropeobj179006* storage0; TY533811 LOC6; TY536238 LOC7; result0 = (Ropeobj179006*)0; size0 = getsize_321135_3876443242(t0); { if (!(size0 < ((NI64) (intsize_177641_4151366050)))) goto LA3; storage0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_36)); } goto LA1; LA3: ; { storage0 = gettypedesc_536671_839829468((*p0).module, t0); } LA1: ; result0 = gettempname_534596_839829468((*p0).module); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = storage0; LOC6[1] = result0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = result0; LOC7[1] = rdcharloc_539227_839829468(a0); LOC7[2] = rdcharloc_539227_839829468(b0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), frmt0, LOC7, 3); { NIM_BOOL LOC10; TY536238 LOC14; NI64 LOC15; NI64 LOC16; LOC10 = (NIM_BOOL)0; LOC10 = (size0 < ((NI64) (intsize_177641_4151366050))); if (LOC10) goto LA11; LOC10 = ((*t0).kind == ((Ttypekind293244) 20) || (*t0).kind == ((Ttypekind293244) 14)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC15 = (NI64)0; LOC15 = firstord_321001_3876443242(t0); LOC14[1] = intliteral_540270_839829468(LOC15); LOC16 = (NI64)0; LOC16 = lastord_321004_3876443242(t0); LOC14[2] = intliteral_540270_839829468(LOC16); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3); } LA12: ; return result0; } N_NIMCALL(void, binaryarithoverflow_552262_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 m0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* t0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_297099_850551059((*e0).typ, IL64(211106233624832)); { Ropeobj179006* res0; TY536238 LOC5; if (!!((((*p0).options &(1U<<((NU)(((Toption170009) 5))&31U)))!=0))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC5[1] = rdloc_539188_839829468(a0); LOC5[2] = rdloc_539188_839829468(b0); res0 = HEX25_179905_2381377266(opr_552279_839829468[(m0)- 45], LOC5, 3); putintodest_551468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc293812) 0)); } goto LA1; LA3: ; { Ropeobj179006* res0; NimStringDesc* LOC7; TY533811 LOC13; Ropeobj179006* LOC14; LOC7 = (NimStringDesc*)0; { if (!((*t0).kind == ((Ttypekind293244) 35))) goto LA10; LOC7 = copyString(prc64_552274_839829468[(m0)- 45]); } goto LA8; LA10: ; { LOC7 = copyString(prc_552269_839829468[(m0)- 45]); } LA8: ; res0 = binaryarithoverflowraw_552235_839829468(p0, t0, a0, b0, LOC7); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC13[1] = res0; LOC14 = (Ropeobj179006*)0; LOC14 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc293812) 0)); } LA1: ; } N_NIMCALL(Ropeobj179006*, lenfield_540305_839829468)(Tcproc530021* p0) { Ropeobj179006* result0; NimStringDesc* LOC1; result0 = (Ropeobj179006*)0; LOC1 = (NimStringDesc*)0; { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC4) goto LA5; LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA5: ; if (!LOC4) goto LA6; LOC1 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA2; LA6: ; { LOC1 = copyString(((NimStringDesc*) &T839829468_158)); } LA2: ; result0 = rope_179277_2381377266(LOC1); return result0; } N_NIMCALL(void, gcusage_555439_839829468)(Tnode293802* n0) { { NimStringDesc* LOC5; if (!(gselectedgc_170133_2607990831 == ((Tgcmode170080) 0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = rendertree_312044_382274130(n0, 0); message_197095_155036129((*n0).info, ((Tmsgkind192002) 263), LOC5); } LA3: ; } N_NIMCALL(void, genrepr_556339_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* t0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); switch ((*t0).kind) { case ((Ttypekind293244) 31) ... ((Ttypekind293244) 35): case ((Ttypekind293244) 40) ... ((Ttypekind293244) 44): { TY179507 LOC2; Ropeobj179006* LOC3; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_539188_839829468(a0); LOC3 = (Ropeobj179006*)0; LOC3 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC3, a0.s); } break; case ((Ttypekind293244) 36) ... ((Ttypekind293244) 39): { TY179507 LOC5; Ropeobj179006* LOC6; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(a0); LOC6 = (Ropeobj179006*)0; LOC6 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } break; case ((Ttypekind293244) 1): { TY179507 LOC8; Ropeobj179006* LOC9; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(a0); LOC9 = (Ropeobj179006*)0; LOC9 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC9, a0.s); } break; case ((Ttypekind293244) 2): { TY179507 LOC11; Ropeobj179006* LOC12; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_539188_839829468(a0); LOC12 = (Ropeobj179006*)0; LOC12 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC12, a0.s); } break; case ((Ttypekind293244) 14): case ((Ttypekind293244) 15): { TY533811 LOC14; Ropeobj179006* LOC15; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_539188_839829468(a0); LOC14[1] = gentypeinfo_536941_839829468((*p0).module, t0); LOC15 = (Ropeobj179006*)0; LOC15 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } break; case ((Ttypekind293244) 28): { TY179507 LOC17; Ropeobj179006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_539188_839829468(a0); LOC18 = (Ropeobj179006*)0; LOC18 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } break; case ((Ttypekind293244) 19): { TY533811 LOC20; Ropeobj179006* LOC21; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = addrloc_539204_839829468(a0); LOC20[1] = gentypeinfo_536941_839829468((*p0).module, t0); LOC21 = (Ropeobj179006*)0; LOC21 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC21, a0.s); } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { Tloc293816 b0; TY533811 LOC34; Ttype293840* LOC35; Ropeobj179006* LOC36; memset((void*)(&b0), 0, sizeof(b0)); switch ((*a0.t).kind) { case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { TY179507 LOC24; Ropeobj179006* LOC25; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = rdloc_539188_839829468(a0); LOC25 = (Ropeobj179006*)0; LOC25 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1); putintodest_551468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s); } break; case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { TY533811 LOC27; Ropeobj179006* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rdloc_539188_839829468(a0); LOC27[1] = lenfield_540305_839829468(p0); LOC28 = (Ropeobj179006*)0; LOC28 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2); putintodest_551468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s); } break; case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { TY533811 LOC30; NI64 LOC31; Ropeobj179006* LOC32; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = rdloc_539188_839829468(a0); LOC31 = (NI64)0; LOC31 = lengthord_321007_3876443242(a0.t); LOC30[1] = rope_179401_2381377266(LOC31); LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2); putintodest_551468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s); } break; default: { internalerror_197100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381)); } break; } memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_539188_839829468(b0); LOC35 = (Ttype293840*)0; LOC35 = elemtype_321394_3876443242(t0); LOC34[1] = gentypeinfo_536941_839829468((*p0).module, LOC35); LOC36 = (Ropeobj179006*)0; LOC36 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC36, a0.s); } break; case ((Ttypekind293244) 29): case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): case ((Ttypekind293244) 22): case ((Ttypekind293244) 21): case ((Ttypekind293244) 26): case ((Ttypekind293244) 5): case ((Ttypekind293244) 24): { TY533811 LOC38; Ropeobj179006* LOC39; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_539188_839829468(a0); LOC38[1] = gentypeinfo_536941_839829468((*p0).module, t0); LOC39 = (Ropeobj179006*)0; LOC39 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC39, a0.s); } break; case ((Ttypekind293244) 3): case ((Ttypekind293244) 62): { localerror_197085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384)); } break; default: { TY533811 LOC42; Ropeobj179006* LOC43; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = addrloc_539204_839829468(a0); LOC42[1] = gentypeinfo_536941_839829468((*p0).module, t0); LOC43 = (Ropeobj179006*)0; LOC43 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC43, a0.s); } break; } gcusage_555439_839829468(e0); } N_NIMCALL(void, gengettypeinfo_556383_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Ttype293840* t0; Ropeobj179006* LOC1; t0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); LOC1 = (Ropeobj179006*)0; LOC1 = gentypeinfo_536941_839829468((*p0).module, t0); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc293812) 0)); } N_NIMCALL(void, genswap_556638_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Tloc293816 tmp0; Ttype293840* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); LOC1 = (Ttype293840*)0; LOC1 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); gettemp_538032_839829468(p0, LOC1, (&tmp0), NIM_FALSE); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); genassignment_540264_839829468(p0, tmp0, a0, 0); genassignment_540264_839829468(p0, a0, b0, 0); genassignment_540264_839829468(p0, b0, tmp0, 0); } N_NIMCALL(void, unaryexpr_552209_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; TY179507 LOC1; Ropeobj179006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); } N_NIMCALL(void, binarystmt_551501_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; Tloc293816 b0; TY533811 LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { if (!!(((*d0).k == ((Tlockind293808) 0)))) goto LA3; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387)); } LA3: ; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(a0); LOC5[1] = rdloc_539188_839829468(b0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), frmt0, LOC5, 2); } N_NIMCALL(void, genstrconcat_555452_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 tmp0; NI L0; Ropeobj179006* appends0; Ropeobj179006* lens0; TY536238 LOC21; Ropeobj179006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); L0 = ((NI) 0); appends0 = NIM_NIL; lens0 = NIM_NIL; { NI i_555475_839829468; NI HEX3Atmp_555547_839829468; NI LOC2; NI res_555550_839829468; i_555475_839829468 = (NI)0; HEX3Atmp_555547_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(e0); HEX3Atmp_555547_839829468 = (NI)(LOC2 - ((NI) 2)); res_555550_839829468 = ((NI) 0); { while (1) { if (!(res_555550_839829468 <= HEX3Atmp_555547_839829468)) goto LA4; i_555475_839829468 = res_555550_839829468; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))], (&a0)); { Ttype293840* LOC7; TY533811 LOC10; Ropeobj179006* LOC11; LOC7 = (Ttype293840*)0; LOC7 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind293244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0.r; LOC10[1] = rdloc_539188_839829468(a0); LOC11 = (Ropeobj179006*)0; LOC11 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_179482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY533811 LOC19; Ropeobj179006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))]).kind >= ((Tnodekind293020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))]).kind <= ((Tnodekind293020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_555475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY533811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_539188_839829468(a0); LOC18[1] = lenfield_540305_839829468(p0); addf_180205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0.r; LOC19[1] = rdloc_539188_839829468(a0); LOC20 = (Ropeobj179006*)0; LOC20 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_179482_2381377266(&appends0, LOC20); } LA5: ; res_555550_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = tmp0.r; LOC21[1] = lens0; LOC21[2] = rope_179401_2381377266(((NI64) (L0))); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3); LOC22 = (Ropeobj179006**)0; LOC22 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC22, appends0); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA25; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI293816)); } goto LA23; LA25: ; { genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA23: ; gcusage_555439_839829468(e0); } N_NIMCALL(void, genstrappend_555554_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 dest0; Ropeobj179006* appends0; Ropeobj179006* lens0; NI L0; TY536238 LOC21; Ropeobj179006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&dest0), 0, sizeof(dest0)); appends0 = (Ropeobj179006*)0; lens0 = (Ropeobj179006*)0; L0 = ((NI) 0); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0)); { NI i_555615_839829468; NI HEX3Atmp_555676_839829468; NI LOC2; NI res_555679_839829468; i_555615_839829468 = (NI)0; HEX3Atmp_555676_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(e0); HEX3Atmp_555676_839829468 = (NI)(LOC2 - ((NI) 3)); res_555679_839829468 = ((NI) 0); { while (1) { if (!(res_555679_839829468 <= HEX3Atmp_555676_839829468)) goto LA4; i_555615_839829468 = res_555679_839829468; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))], (&a0)); { Ttype293840* LOC7; TY533811 LOC10; Ropeobj179006* LOC11; LOC7 = (Ttype293840*)0; LOC7 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind293244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_539188_839829468(dest0); LOC10[1] = rdloc_539188_839829468(a0); LOC11 = (Ropeobj179006*)0; LOC11 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_179482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY533811 LOC19; Ropeobj179006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))]).kind >= ((Tnodekind293020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))]).kind <= ((Tnodekind293020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_555615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY533811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_539188_839829468(a0); LOC18[1] = lenfield_540305_839829468(p0); addf_180205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_539188_839829468(dest0); LOC19[1] = rdloc_539188_839829468(a0); LOC20 = (Ropeobj179006*)0; LOC20 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_179482_2381377266(&appends0, LOC20); } LA5: ; res_555679_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_539188_839829468(dest0); LOC21[1] = lens0; LOC21[2] = rope_179401_2381377266(((NI64) (L0))); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3); LOC22 = (Ropeobj179006**)0; LOC22 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC22, appends0); gcusage_555439_839829468(e0); } N_NIMCALL(void, genseqelemappend_555683_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { NimStringDesc* seqappendpattern0; Tloc293816 a0; Tloc293816 b0; Tloc293816 dest0; Ttype293840* bt0; TY536238 LOC8; Ttype293840* LOC9; TY533811 LOC10; TY533811 LOC11; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396)); } goto LA1; LA5: ; { seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397)); } LA1: ; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); bt0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(a0); LOC9 = (Ttype293840*)0; LOC9 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC8[1] = gettypedesc_536671_839829468((*p0).module, LOC9); LOC8[2] = gettypedesc_536671_839829468((*p0).module, bt0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), seqappendpattern0, LOC8, 3); initloc_533273_839829468((&dest0), ((Tlockind293808) 6), bt0, ((Tstorageloc293812) 3)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_539188_839829468(a0); LOC10[1] = lenfield_540305_839829468(p0); dest0.r = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2); genassignment_540264_839829468(p0, dest0, b0, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_539188_839829468(a0); LOC11[1] = lenfield_540305_839829468(p0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2); gcusage_555439_839829468(e0); } N_NIMCALL(void, binaryexpr_551549_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; Tloc293816 b0; TY533811 LOC1; Ropeobj179006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); LOC1[1] = rdloc_539188_839829468(b0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); } N_NIMCALL(void, genstrequals_557666_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 x0; Tnode293802* a0; Tnode293802* b0; memset((void*)(&x0), 0, sizeof(x0)); a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; b0 = (*e0).kindU.S6.sons->data[((NI) 2)]; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*a0).kind == ((Tnodekind293020) 23)); if (LOC3) goto LA4; LOC3 = ((*b0).kind == ((Tnodekind293020) 23)); LA4: ; if (!LOC3) goto LA5; binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } goto LA1; LA5: ; { NIM_BOOL LOC8; TY533811 LOC12; Ropeobj179006* LOC13; LOC8 = (NIM_BOOL)0; LOC8 = ((*a0).kind >= ((Tnodekind293020) 20) && (*a0).kind <= ((Tnodekind293020) 22)); if (!(LOC8)) goto LA9; LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0); LA9: ; if (!LOC8) goto LA10; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0)); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_539188_839829468(x0); LOC12[1] = lenfield_540305_839829468(p0); LOC13 = (Ropeobj179006*)0; LOC13 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc293812) 0)); } goto LA1; LA10: ; { NIM_BOOL LOC15; TY533811 LOC19; Ropeobj179006* LOC20; LOC15 = (NIM_BOOL)0; LOC15 = ((*b0).kind >= ((Tnodekind293020) 20) && (*b0).kind <= ((Tnodekind293020) 22)); if (!(LOC15)) goto LA16; LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0); LA16: ; if (!LOC15) goto LA17; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_539188_839829468(x0); LOC19[1] = lenfield_540305_839829468(p0); LOC20 = (Ropeobj179006*)0; LOC20 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc293812) 0)); } goto LA1; LA17: ; { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401)); } LA1: ; } N_NIMCALL(void, genisnil_553620_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Ttype293840* t0; t0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind293244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention293002) 8)); LA4: ; if (!LOC3) goto LA5; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404)); } goto LA1; LA5: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405)); } LA1: ; } N_NIMCALL(void, gendollar_556391_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; TY179507 LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); a0.r = ropecg_533407_839829468((*p0).module, frmt0, LOC1, 1); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA4; gettemp_538032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA4: ; genassignment_540264_839829468(p0, (*d0), a0, 0); gcusage_555439_839829468(n0); } N_NIMCALL(Ropeobj179006*, genofhelper_556139_839829468)(Tcproc530021* p0, Ttype293840* dest0, Ropeobj179006* a0) { Ropeobj179006* result0; Ropeobj179006* ti0; result0 = (Ropeobj179006*)0; ti0 = gentypeinfo_536941_839829468((*p0).module, dest0); { NIM_BOOL LOC3; NIM_BOOL LOC5; TY533811 LOC9; LOC3 = (NIM_BOOL)0; LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag293431) 2))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag530025) 5))&7U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag293431) 5))&31U)))!=0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = a0; LOC9[1] = ti0; result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2); } goto LA1; LA7: ; { Ropeobj179006* LOC11; Ropeobj179006* cache0; Ropeobj179006* LOC12; TY179507 LOC13; TY536238 LOC14; LOC11 = (Ropeobj179006*)0; LOC11 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129)); (*(*p0).module).labels += ((NI) 1); LOC12 = (Ropeobj179006*)0; LOC12 = rope_179401_2381377266(((NI64) ((*(*p0).module).labels))); cache0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_415), LOC12); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = cache0; addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = a0; LOC14[1] = ti0; LOC14[2] = cache0; result0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3); } LA1: ; return result0; } N_NIMCALL(void, genof_556201_839829468)(Tcproc530021* p0, Tnode293802* x0, Ttype293840* typ0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* dest0; Ropeobj179006* r0; Ropeobj179006* nilcheck0; Ttype293840* t0; Ttype293840* LOC41; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, x0, (&a0)); dest0 = skiptypes_297099_850551059(typ0, IL64(211106247256320)); r0 = rdloc_539188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_297099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype293840* LOC16; if (!((*t0).kind == ((Ttypekind293244) 23) || (*t0).kind == ((Ttypekind293244) 21) || (*t0).kind == ((Ttypekind293244) 22))) goto LA2; { if (!!(((*t0).kind == ((Ttypekind293244) 23)))) goto LA5; nilcheck0 = r0; } LA5: ; { NIM_BOOL LOC9; NIM_BOOL LOC11; TY179507 LOC15; LOC9 = (NIM_BOOL)0; LOC9 = !(((*t0).kind == ((Ttypekind293244) 23))); if (LOC9) goto LA10; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA12: ; LOC9 = !(LOC11); LA10: ; if (!LOC9) goto LA13; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = r0; r0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1); } LA13: ; LOC16 = (Ttype293840*)0; LOC16 = lastson_296377_850551059(t0); t0 = skiptypes_297099_850551059(LOC16, IL64(211106232576256)); } LA2: ; } { NIM_BOOL LOC19; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA20: ; if (!!(LOC19)) goto LA21; { while (1) { NIM_BOOL LOC25; TY534289 LOC27; Ropeobj179006* LOC28; LOC25 = (NIM_BOOL)0; LOC25 = ((*t0).kind == ((Ttypekind293244) 17)); if (!(LOC25)) goto LA26; LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA26: ; if (!LOC25) goto LA24; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj179006*)0; LOC28 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0); add_179482_2381377266(&r0, LOC28); t0 = skiptypes_297099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA24: ; } } LA21: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = isobjlackingtypefield_534513_839829468(t0); if (!LOC31) goto LA32; globalerror_197071_155036129((*x0).info, ((Tmsgkind192002) 4), ((NimStringDesc*) &T839829468_412)); } LA32: ; { TY533811 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = genofhelper_556139_839829468(p0, dest0, r0); r0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2); } goto LA34; LA36: ; { TY179507 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = genofhelper_556139_839829468(p0, dest0, r0); r0 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1); } LA34: ; LOC41 = (Ttype293840*)0; LOC41 = getsystype_339150_3937434831(((Ttypekind293244) 1)); putintodest_551468_839829468(p0, d0, LOC41, r0, a0.s); } N_NIMCALL(void, genof_556331_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { genof_556201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0); } N_NIMCALL(void, rawgennew_555741_839829468)(Tcproc530021* p0, Tloc293816 a0, Ropeobj179006* sizeexpr_555745_839829468) { Ropeobj179006* sizeexpr0; Ttype293840* reftype0; Tloc293816 b0; TY536238 args0; Ttype293840* bt0; sizeexpr0 = sizeexpr_555745_839829468; reftype0 = skiptypes_297099_850551059(a0.t, IL64(211106242013440)); memset((void*)(&b0), 0, sizeof(b0)); initloc_533273_839829468((&b0), ((Tlockind293808) 6), a0.t, ((Tstorageloc293812) 3)); { TY179507 LOC5; Ttype293840* LOC6; if (!sizeexpr0 == 0) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (Ttype293840*)0; LOC6 = skiptypes_297099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); LOC5[0] = gettypedesc_536671_839829468((*p0).module, LOC6); sizeexpr0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1); } LA3: ; memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_536671_839829468((*p0).module, reftype0); args0[1] = gentypeinfo_536941_839829468((*p0).module, reftype0); args0[2] = sizeexpr0; { NIM_BOOL LOC9; TY533811 LOC21; LOC9 = (NIM_BOOL)0; LOC9 = (a0.s == ((Tstorageloc293812) 3)); if (!(LOC9)) goto LA10; LOC9 = usesnativegc_170177_2607990831(); LA10: ; if (!LOC9) goto LA11; { NIM_BOOL LOC15; TY179507 LOC18; LOC15 = (NIM_BOOL)0; LOC15 = canformacycle_321123_3876443242(a0.t); if (!LOC15) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_539188_839829468(a0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1); } goto LA13; LA16: ; { TY179507 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_539188_839829468(a0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1); } LA13: ; b0.r = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_539188_839829468(a0); LOC21[1] = rdloc_539188_839829468(b0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2); } goto LA7; LA11: ; { b0.r = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3); genassignment_540264_839829468(p0, a0, b0, 0); } LA7: ; bt0 = skiptypes_297099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 2), bt0, a0, NIM_FALSE); } N_NIMCALL(void, gennew_555782_839829468)(Tcproc530021* p0, Tnode293802* e0) { Tloc293816 a0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI LOC3; Tloc293816 se0; Ropeobj179006* LOC6; LOC3 = (NI)0; LOC3 = len_294081_850551059(e0); if (!(LOC3 == ((NI) 3))) goto LA4; memset((void*)(&se0), 0, sizeof(se0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0)); LOC6 = (Ropeobj179006*)0; LOC6 = rdloc_539188_839829468(se0); rawgennew_555741_839829468(p0, a0, LOC6); } goto LA1; LA4: ; { rawgennew_555741_839829468(p0, a0, NIM_NIL); } LA1: ; gcusage_555439_839829468(e0); } N_NIMCALL(void, gennewfinalize_556110_839829468)(Tcproc530021* p0, Tnode293802* e0) { Tloc293816 a0; Tloc293816 b0; Tloc293816 f0; Ttype293840* reftype0; Ttype293840* bt0; Ropeobj179006* ti0; TY533811 LOC1; TY536238 LOC2; Ttype293840* LOC3; Ttype293840* LOC4; Ttype293840* LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&f0), 0, sizeof(f0)); reftype0 = (Ttype293840*)0; bt0 = (Ttype293840*)0; ti0 = (Ropeobj179006*)0; reftype0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0)); initloc_533273_839829468((&b0), ((Tlockind293808) 6), a0.t, ((Tstorageloc293812) 3)); ti0 = gentypeinfo_536941_839829468((*p0).module, reftype0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = ti0; LOC1[1] = rdloc_539188_839829468(f0); addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_536671_839829468((*p0).module, reftype0); LOC2[1] = ti0; LOC3 = (Ttype293840*)0; LOC3 = lastson_296377_850551059(reftype0); LOC4 = (Ttype293840*)0; LOC4 = skiptypes_297099_850551059(LOC3, IL64(211106233624832)); LOC2[2] = gettypedesc_536671_839829468((*p0).module, LOC4); b0.r = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3); genassignment_540264_839829468(p0, a0, b0, 0); LOC5 = (Ttype293840*)0; LOC5 = lastson_296377_850551059(reftype0); bt0 = skiptypes_297099_850551059(LOC5, IL64(211106233624832)); genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 2), bt0, a0, NIM_FALSE); gcusage_555439_839829468(e0); } N_NIMCALL(void, gennewseqaux_555795_839829468)(Tcproc530021* p0, Tloc293816 dest0, Ropeobj179006* length0) { Ttype293840* seqtype0; TY536238 args0; Tloc293816 call0; seqtype0 = skiptypes_297099_850551059(dest0.t, IL64(211106242013440)); memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_536671_839829468((*p0).module, seqtype0); args0[1] = gentypeinfo_536941_839829468((*p0).module, seqtype0); args0[2] = length0; memset((void*)(&call0), 0, sizeof(call0)); initloc_533273_839829468((&call0), ((Tlockind293808) 6), dest0.t, ((Tstorageloc293812) 3)); { NIM_BOOL LOC3; TY533811 LOC15; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc293812) 3)); if (!(LOC3)) goto LA4; LOC3 = usesnativegc_170177_2607990831(); LA4: ; if (!LOC3) goto LA5; { NIM_BOOL LOC9; TY179507 LOC12; LOC9 = (NIM_BOOL)0; LOC9 = canformacycle_321123_3876443242(dest0.t); if (!LOC9) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_539188_839829468(dest0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1); } goto LA7; LA10: ; { TY179507 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_539188_839829468(dest0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1); } LA7: ; call0.r = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rdloc_539188_839829468(dest0); LOC15[1] = rdloc_539188_839829468(call0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2); } goto LA1; LA5: ; { call0.r = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3); genassignment_540264_839829468(p0, dest0, call0, 0); } LA1: ; } N_NIMCALL(void, gennewseq_555824_839829468)(Tcproc530021* p0, Tnode293802* e0) { Tloc293816 a0; Tloc293816 b0; Ropeobj179006* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (Ropeobj179006*)0; LOC1 = rdloc_539188_839829468(b0); gennewseqaux_555795_839829468(p0, a0, LOC1); gcusage_555439_839829468(e0); } N_NIMCALL(void, gennewseqofcap_555836_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Ttype293840* seqtype0; Tloc293816 a0; TY536238 LOC1; Ropeobj179006* LOC2; seqtype0 = skiptypes_297099_850551059((*e0).typ, IL64(211106242013440)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = gettypedesc_536671_839829468((*p0).module, seqtype0); LOC1[1] = gentypeinfo_536941_839829468((*p0).module, seqtype0); LOC1[2] = rdloc_539188_839829468(a0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); gcusage_555439_839829468(e0); } N_NIMCALL(Ropeobj179006*, getclosuretype_536683_839829468)(Tcgen530027* m0, Ttype293840* t0, Tclosuretypekind536679 kind0) { Ropeobj179006* result0; Intset269030 check0; Ropeobj179006* rettype0; Ropeobj179006* desc0; result0 = (Ropeobj179006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_269885_2627731572((&check0)); result0 = gettempname_534596_839829468(m0); rettype0 = (Ropeobj179006*)0; desc0 = (Ropeobj179006*)0; genprocparams_535115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind536679) 0))), NIM_FALSE); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedtype_534449_839829468(t0); if (!!(LOC3)) goto LA4; { NIM_BOOL LOC8; TY536235 LOC12; LOC8 = (NIM_BOOL)0; LOC8 = !(((*t0).callconv == ((Tcallingconvention293002) 8))); if (LOC8) goto LA9; LOC8 = !((kind0 == ((Tclosuretypekind536679) 2))); LA9: ; if (!LOC8) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_179277_2381377266(Callingconvtostr_534585_839829468[((*t0).callconv)- 0]); LOC12[1] = rettype0; LOC12[2] = result0; LOC12[3] = desc0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4); } goto LA6; LA10: ; { TY536238 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC14[1] = rettype0; LOC14[2] = desc0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3); } LA6: ; } LA4: ; return result0; } N_NIMCALL(void, gensomecast_557480_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* etyp0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); etyp0 = skiptypes_297099_850551059((*e0).typ, IL64(211106233624832)); { NIM_BOOL LOC3; TY533811 LOC7; Ropeobj179006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((*etyp0).kind == ((Ttypekind293244) 18) || (*etyp0).kind == ((Ttypekind293244) 17) || (*etyp0).kind == ((Ttypekind293244) 16) || (*etyp0).kind == ((Ttypekind293244) 27) || (*etyp0).kind == ((Ttypekind293244) 48) || (*etyp0).kind == ((Ttypekind293244) 4)); if (!(LOC3)) goto LA4; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag293810) 0))&15U)))!=0)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_536671_839829468((*p0).module, (*e0).typ); LOC7[1] = addrloc_539204_839829468(a0); LOC8 = (Ropeobj179006*)0; LOC8 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC8, a0.s); } goto LA1; LA5: ; { NIM_BOOL LOC10; TY533811 LOC14; Ropeobj179006* LOC15; LOC10 = (NIM_BOOL)0; LOC10 = ((*etyp0).kind == ((Ttypekind293244) 25)); if (!(LOC10)) goto LA11; LOC10 = ((*etyp0).callconv == ((Tcallingconvention293002) 8)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = getclosuretype_536683_839829468((*p0).module, etyp0, ((Tclosuretypekind536679) 1)); LOC14[1] = rdcharloc_539227_839829468(a0); LOC15 = (Ropeobj179006*)0; LOC15 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } goto LA1; LA12: ; { TY533811 LOC17; Ropeobj179006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_536671_839829468((*p0).module, (*e0).typ); LOC17[1] = rdcharloc_539227_839829468(a0); LOC18 = (Ropeobj179006*)0; LOC18 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } LA1: ; } N_NIMCALL(void, unaryexprchar_552222_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; TY179507 LOC1; Ropeobj179006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_539227_839829468(a0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); } N_NIMCALL(void, genord_557474_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { unaryexprchar_552222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301)); } N_NIMCALL(void, genarraylen_556415_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0) { Tnode293802* a0; Ttype293840* typ0; a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; { if (!((*a0).kind == ((Tnodekind293020) 64))) goto LA3; a0 = (*a0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; typ0 = skiptypes_297099_850551059((*a0).typ, IL64(211106240964864)); switch ((*typ0).kind) { case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { { if (!(op0 == ((Tmagic293524) 8))) goto LA8; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431)); } goto LA6; LA8: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432)); } LA6: ; } break; case ((Ttypekind293244) 29): { usestringh_533345_839829468((*p0).module); { if (!(op0 == ((Tmagic293524) 8))) goto LA14; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433)); } goto LA12; LA14: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434)); } LA12: ; } break; case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA21: ; if (!!(LOC20)) goto LA22; { if (!(op0 == ((Tmagic293524) 8))) goto LA26; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435)); } goto LA24; LA26: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436)); } LA24: ; } goto LA18; LA22: ; { { if (!(op0 == ((Tmagic293524) 8))) goto LA32; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437)); } goto LA30; LA32: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438)); } LA30: ; } LA18: ; } break; case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { { NI64 LOC40; Ropeobj179006* LOC41; if (!(op0 == ((Tmagic293524) 8))) goto LA38; LOC40 = (NI64)0; LOC40 = lastord_321004_3876443242(typ0); LOC41 = (Ropeobj179006*)0; LOC41 = rope_179401_2381377266(LOC40); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc293812) 0)); } goto LA36; LA38: ; { NI64 LOC43; Ropeobj179006* LOC44; LOC43 = (NI64)0; LOC43 = lengthord_321007_3876443242(typ0); LOC44 = (Ropeobj179006*)0; LOC44 = rope_179401_2381377266(LOC43); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc293812) 0)); } LA36: ; } break; default: { internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439)); } break; } } N_NIMCALL(void, unarystmt_551527_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; TY179507 LOC5; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind293808) 0)))) goto LA3; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442)); } LA3: ; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(a0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), frmt0, LOC5, 1); } N_NIMCALL(void, gensetlengthstr_556632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { binarystmt_551501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445)); gcusage_555439_839829468(e0); } N_NIMCALL(void, gensetlengthseq_556500_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* t0; NimStringDesc* setlenpattern0; TY536235 LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446)); } goto LA1; LA5: ; { setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447)); } LA1: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(a0); LOC8[1] = rdloc_539188_839829468(b0); LOC8[2] = gettypedesc_536671_839829468((*p0).module, t0); LOC8[3] = gettypedesc_536671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), setlenpattern0, LOC8, 4); gcusage_555439_839829468(e0); } N_NIMCALL(Ropeobj179006*, rdsetelemloc_556662_839829468)(Tloc293816 a0, Ttype293840* settype0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = rdcharloc_539227_839829468(a0); { NI64 LOC3; TY533811 LOC6; NI64 LOC7; LOC3 = (NI64)0; LOC3 = firstord_321001_3876443242(settype0); if (!!((LOC3 == IL64(0)))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; LOC7 = (NI64)0; LOC7 = firstord_321001_3876443242(settype0); LOC6[1] = rope_179401_2381377266(LOC7); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2); } LA4: ; return result0; } N_NIMCALL(void, binarystmtinexcl_556857_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; Tloc293816 b0; TY533811 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); LOC1[1] = rdsetelemloc_556662_839829468(b0, a0.t); linef_533700_839829468(p0, ((Tcprocsection530011) 2), frmt0, LOC1, 2); } N_NIMCALL(void, binaryexprchar_551809_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NimStringDesc* frmt0) { Tloc293816 a0; Tloc293816 b0; TY533811 LOC1; Ropeobj179006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_539227_839829468(a0); LOC1[1] = rdcharloc_539227_839829468(b0); LOC2 = (Ropeobj179006*)0; LOC2 = ropecg_533407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); } N_NIMCALL(NIM_BOOL, fewcmps_556803_839829468)(Tnode293802* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { if (!!(((*s0).kind == ((Tnodekind293020) 39)))) goto LA3; internalerror_197100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463)); } LA3: ; { NIM_BOOL LOC7; NI64 LOC8; LOC7 = (NIM_BOOL)0; LOC8 = (NI64)0; LOC8 = getsize_321135_3876443242((*s0).typ); LOC7 = (LOC8 <= ((NI64) (intsize_177641_4151366050))); if (!(LOC7)) goto LA9; LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag293427) 4))&15U)))!=0); LA9: ; if (!LOC7) goto LA10; result0 = NIM_FALSE; } goto LA5; LA10: ; { Ttype293840* LOC13; LOC13 = (Ttype293840*)0; LOC13 = elemtype_321394_3876443242((*s0).typ); if (!((*LOC13).kind == ((Ttypekind293244) 31) || (*LOC13).kind >= ((Ttypekind293244) 33) && (*LOC13).kind <= ((Ttypekind293244) 35))) goto LA14; result0 = NIM_TRUE; } goto LA5; LA14: ; { NI LOC17; LOC17 = (NI)0; LOC17 = sonslen_296351_850551059(s0); result0 = (LOC17 <= ((NI) 8)); } LA5: ; return result0; } N_NIMCALL(void, binaryexprin_556837_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* a0, Tloc293816* b0, Tloc293816* d0, NimStringDesc* frmt0) { TY533811 LOC1; Ropeobj179006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468((*a0)); LOC1[1] = rdsetelemloc_556662_839829468((*b0), (*a0).t); LOC2 = (Ropeobj179006*)0; LOC2 = HEX25_179905_2381377266(frmt0, LOC1, 2); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc293812) 0)); } N_NIMCALL(void, geninexpraux_554496_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* a0, Tloc293816* b0, Tloc293816* d0) { Ttype293840* LOC1; NI64 LOC2; LOC1 = (Ttype293840*)0; LOC1 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC2 = (NI64)0; LOC2 = getsize_321135_3876443242(LOC1); switch (((NI) (LOC2))) { case ((NI) 1): { binaryexprin_556837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467)); } break; case ((NI) 2): { binaryexprin_556837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468)); } break; case ((NI) 4): { binaryexprin_556837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469)); } break; case ((NI) 8): { binaryexprin_556837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470)); } break; default: { binaryexprin_556837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471)); } break; } } N_NIMCALL(void, geninop_557009_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Tloc293816 x0; Tloc293816 y0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); { NIM_BOOL LOC3; Tnode293802* ea0; NI length0; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind293020) 39)); if (!(LOC3)) goto LA4; LOC3 = fewcmps_556803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]); LA4: ; if (!LOC3) goto LA5; { if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 69))) goto LA9; ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)]; } goto LA7; LA9: ; { ea0 = (*e0).kindU.S6.sons->data[((NI) 2)]; } LA7: ; initlocexpr_540283_839829468(p0, ea0, (&a0)); initloc_533273_839829468((&b0), ((Tlockind293808) 6), (*e0).typ, ((Tstorageloc293812) 0)); b0.r = rope_179277_2381377266(((NimStringDesc*) &T839829468_118)); length0 = sonslen_296351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]); { NI i_557061_839829468; NI HEX3Atmp_557412_839829468; NI res_557415_839829468; i_557061_839829468 = (NI)0; HEX3Atmp_557412_839829468 = (NI)0; HEX3Atmp_557412_839829468 = (NI)(length0 - ((NI) 1)); res_557415_839829468 = ((NI) 0); { while (1) { if (!(res_557415_839829468 <= HEX3Atmp_557412_839829468)) goto LA14; i_557061_839829468 = res_557415_839829468; { TY536238 LOC19; if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_557061_839829468]).kind == ((Tnodekind293020) 44))) goto LA17; initlocexpr_540283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_557061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_540283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_557061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdcharloc_539227_839829468(a0); LOC19[1] = rdcharloc_539227_839829468(x0); LOC19[2] = rdcharloc_539227_839829468(y0); addf_180205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3); } goto LA15; LA17: ; { TY533811 LOC21; initlocexpr_540283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_557061_839829468], (&x0)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdcharloc_539227_839829468(a0); LOC21[1] = rdcharloc_539227_839829468(x0); addf_180205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2); } LA15: ; { if (!(i_557061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24; add_179487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466)); } LA24: ; res_557415_839829468 += ((NI) 1); } LA14: ; } } add_179487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117)); putintodest_551468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc293812) 0)); } goto LA1; LA5: ; { initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); geninexpraux_554496_839829468(p0, e0, (&a0), (&b0), d0); } LA1: ; } N_NIMCALL(void, gensetop_557419_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0) { Tloc293816 a0; Tloc293816 b0; Tloc293816 i0; Ttype293840* settype0; NI size0; NI64 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&i0), 0, sizeof(i0)); settype0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC1 = (NI64)0; LOC1 = getsize_321135_3876443242(settype0); size0 = ((NI) (LOC1)); switch (size0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { switch (op0) { case ((Tmagic293524) 39): { NimStringDesc* ts0; NimStringDesc* LOC4; NimStringDesc* LOC5; NimStringDesc* LOC6; LOC4 = (NimStringDesc*)0; LOC5 = (NimStringDesc*)0; LOC5 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC4 = rawNewString(LOC5->Sup.len + 2); appendString(LOC4, ((NimStringDesc*) &T839829468_45)); appendString(LOC4, LOC5); ts0 = LOC4; LOC6 = (NimStringDesc*)0; LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35); appendString(LOC6, ((NimStringDesc*) &T839829468_449)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_450)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_451)); binarystmtinexcl_556857_839829468(p0, e0, d0, LOC6); } break; case ((Tmagic293524) 40): { NimStringDesc* ts0; NimStringDesc* LOC8; NimStringDesc* LOC9; NimStringDesc* LOC10; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC8 = rawNewString(LOC9->Sup.len + 2); appendString(LOC8, ((NimStringDesc*) &T839829468_45)); appendString(LOC8, LOC9); ts0 = LOC8; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42); appendString(LOC10, ((NimStringDesc*) &T839829468_452)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_453)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_454)); binarystmtinexcl_556857_839829468(p0, e0, d0, LOC10); } break; case ((Tmagic293524) 41): { { if (!(size0 <= ((NI) 4))) goto LA14; unaryexprchar_552222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455)); } goto LA12; LA14: ; { unaryexprchar_552222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456)); } LA12: ; } break; case ((Tmagic293524) 133): { binaryexprchar_551809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457)); } break; case ((Tmagic293524) 132): { binaryexprchar_551809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458)); } break; case ((Tmagic293524) 131): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } break; case ((Tmagic293524) 134): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459)); } break; case ((Tmagic293524) 135): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460)); } break; case ((Tmagic293524) 136): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461)); } break; case ((Tmagic293524) 137): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462)); } break; case ((Tmagic293524) 148): { geninop_557009_839829468(p0, e0, d0); } break; default: { internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472)); } break; } } break; default: { switch (op0) { case ((Tmagic293524) 39): { binarystmtinexcl_556857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473)); } break; case ((Tmagic293524) 40): { binarystmtinexcl_556857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474)); } break; case ((Tmagic293524) 41): { NimStringDesc* LOC30; NimStringDesc* LOC31; LOC30 = (NimStringDesc*)0; LOC31 = (NimStringDesc*)0; LOC31 = nimIntToStr(size0); LOC30 = rawNewString(LOC31->Sup.len + 14); appendString(LOC30, ((NimStringDesc*) &T839829468_475)); appendString(LOC30, LOC31); appendChar(LOC30, 41); unaryexprchar_552222_839829468(p0, e0, d0, LOC30); } break; case ((Tmagic293524) 133): case ((Tmagic293524) 132): { Ttype293840* LOC33; TY537475 LOC39; LOC33 = (Ttype293840*)0; LOC33 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC33, (&i0), NIM_FALSE); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype293840* LOC38; if (!((*d0).k == ((Tlockind293808) 0))) goto LA36; LOC38 = (Ttype293840*)0; LOC38 = getsystype_339150_3937434831(((Ttypekind293244) 1)); gettemp_538032_839829468(p0, LOC38, d0, NIM_FALSE); } LA36: ; memset((void*)LOC39, 0, sizeof(LOC39)); LOC39[0] = rdloc_539188_839829468(i0); LOC39[1] = rope_179401_2381377266(((NI64) (size0))); LOC39[2] = rdloc_539188_839829468((*d0)); LOC39[3] = rdloc_539188_839829468(a0); LOC39[4] = rdloc_539188_839829468(b0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), lookupopr_557426_839829468[(op0)- 132], LOC39, 5); } break; case ((Tmagic293524) 131): { NimStringDesc* LOC41; NimStringDesc* LOC42; usestringh_533345_839829468((*p0).module); LOC41 = (NimStringDesc*)0; LOC42 = (NimStringDesc*)0; LOC42 = nimIntToStr(size0); LOC41 = rawNewString(LOC42->Sup.len + 21); appendString(LOC41, ((NimStringDesc*) &T839829468_481)); appendString(LOC41, LOC42); appendString(LOC41, ((NimStringDesc*) &T839829468_482)); binaryexprchar_551809_839829468(p0, e0, d0, LOC41); } break; case ((Tmagic293524) 134): case ((Tmagic293524) 135): case ((Tmagic293524) 136): case ((Tmagic293524) 137): { Ttype293840* LOC44; TY537847 LOC49; LOC44 = (Ttype293840*)0; LOC44 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC44, (&i0), NIM_FALSE); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA47; gettemp_538032_839829468(p0, a0.t, d0, NIM_FALSE); } LA47: ; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_539188_839829468(i0); LOC49[1] = rope_179401_2381377266(((NI64) (size0))); LOC49[2] = rdloc_539188_839829468((*d0)); LOC49[3] = rdloc_539188_839829468(a0); LOC49[4] = rdloc_539188_839829468(b0); LOC49[5] = rope_179277_2381377266(lookupopr_557426_839829468[(op0)- 132]); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6); } break; case ((Tmagic293524) 148): { geninop_557009_839829468(p0, e0, d0); } break; default: { internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484)); } break; } } break; } } static N_INLINE(Ropeobj179006*, genargstringtocstring_540776_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; Tloc293816 a0; TY179507 LOC1; result0 = (Ropeobj179006*)0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1); return result0; } N_NIMCALL(Ropeobj179006*, openarrayloc_540665_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; Tloc293816 a0; Tnode293802* q0; result0 = (Ropeobj179006*)0; memset((void*)(&a0), 0, sizeof(a0)); q0 = skipconv_329882_3876443242(n0); { Tmagic293524 LOC3; Tloc293816 b0; Tloc293816 c0; Tnode293802* LOC6; Tnode293802* LOC7; Tnode293802* LOC8; NimStringDesc* fmt0; Ttype293840* LOC9; TY536238 LOC25; LOC3 = (Tmagic293524)0; LOC3 = getmagic_319502_2616423590(q0); if (!(LOC3 == ((Tmagic293524) 139))) goto LA4; memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&c0), 0, sizeof(c0)); LOC6 = (Tnode293802*)0; LOC6 = HEX5BHEX5D_294238_850551059(q0, ((NI) 1)); initlocexpr_540283_839829468(p0, LOC6, (&a0)); LOC7 = (Tnode293802*)0; LOC7 = HEX5BHEX5D_294238_850551059(q0, ((NI) 2)); initlocexpr_540283_839829468(p0, LOC7, (&b0)); LOC8 = (Tnode293802*)0; LOC8 = HEX5BHEX5D_294238_850551059(q0, ((NI) 3)); initlocexpr_540283_839829468(p0, LOC8, (&c0)); LOC9 = (Ttype293840*)0; LOC9 = skiptypes_297099_850551059(a0.t, IL64(211106243062016)); switch ((*LOC9).kind) { case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { fmt0 = copyString(((NimStringDesc*) &T839829468_486)); } break; case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { { NIM_BOOL LOC14; Ttype293840* LOC15; NIM_BOOL LOC17; LOC14 = (NIM_BOOL)0; LOC15 = (Ttype293840*)0; LOC15 = skiptypes_297099_850551059((*n0).typ, IL64(211106232576256)); LOC14 = ((*LOC15).kind == ((Ttypekind293244) 23)); if (!(LOC14)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC17) goto LA18; LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA18: ; LOC14 = !(LOC17); LA16: ; if (!LOC14) goto LA19; fmt0 = copyString(((NimStringDesc*) &T839829468_487)); } goto LA12; LA19: ; { fmt0 = copyString(((NimStringDesc*) &T839829468_488)); } LA12: ; } break; default: { NimStringDesc* LOC23; NimStringDesc* LOC24; LOC23 = (NimStringDesc*)0; LOC24 = (NimStringDesc*)0; LOC24 = typetostring_321017_3876443242(a0.t, ((Tprefereddesc321011) 0)); LOC23 = rawNewString(LOC24->Sup.len + 14); appendString(LOC23, ((NimStringDesc*) &T839829468_489)); appendString(LOC23, LOC24); internalerror_197113_155036129(LOC23); fmt0 = copyString(((NimStringDesc*) &T839829468_490)); } break; } memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_539188_839829468(a0); LOC25[1] = rdloc_539188_839829468(b0); LOC25[2] = rdloc_539188_839829468(c0); result0 = HEX25_179905_2381377266(fmt0, LOC25, 3); } goto LA1; LA4: ; { Ttype293840* LOC27; initlocexpr_540283_839829468(p0, n0, (&a0)); LOC27 = (Ttype293840*)0; LOC27 = skiptypes_297099_850551059(a0.t, IL64(211106240964864)); switch ((*LOC27).kind) { case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { TY179507 LOC29; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_539188_839829468(a0); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1); } break; case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { { NIM_BOOL LOC33; Ttype293840* LOC34; NIM_BOOL LOC36; TY533811 LOC40; LOC33 = (NIM_BOOL)0; LOC34 = (Ttype293840*)0; LOC34 = skiptypes_297099_850551059((*n0).typ, IL64(211106232576256)); LOC33 = ((*LOC34).kind == ((Ttypekind293244) 23)); if (!(LOC33)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC36) goto LA37; LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA37: ; LOC33 = !(LOC36); LA35: ; if (!LOC33) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = rdloc_539188_839829468(a0); LOC40[1] = lenfield_540305_839829468(p0); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2); } goto LA31; LA38: ; { TY533811 LOC42; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rdloc_539188_839829468(a0); LOC42[1] = lenfield_540305_839829468(p0); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2); } LA31: ; } break; case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { TY533811 LOC44; NI64 LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_539188_839829468(a0); LOC45 = (NI64)0; LOC45 = lengthord_321007_3876443242(a0.t); LOC44[1] = rope_179401_2381377266(LOC45); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2); } break; case ((Ttypekind293244) 21): case ((Ttypekind293244) 22): { Ttype293840* LOC47; LOC47 = (Ttype293840*)0; LOC47 = lastson_296377_850551059(a0.t); switch ((*LOC47).kind) { case ((Ttypekind293244) 28): case ((Ttypekind293244) 24): { TY533811 LOC49; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_539188_839829468(a0); LOC49[1] = lenfield_540305_839829468(p0); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2); } break; case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { TY533811 LOC51; Ttype293840* LOC52; NI64 LOC53; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_539188_839829468(a0); LOC52 = (Ttype293840*)0; LOC52 = lastson_296377_850551059(a0.t); LOC53 = (NI64)0; LOC53 = lengthord_321007_3876443242(LOC52); LOC51[1] = rope_179401_2381377266(LOC53); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2); } break; default: { NimStringDesc* LOC55; NimStringDesc* LOC56; LOC55 = (NimStringDesc*)0; LOC56 = (NimStringDesc*)0; LOC56 = typetostring_321017_3876443242(a0.t, ((Tprefereddesc321011) 0)); LOC55 = rawNewString(LOC56->Sup.len + 14); appendString(LOC55, ((NimStringDesc*) &T839829468_489)); appendString(LOC55, LOC56); internalerror_197113_155036129(LOC55); } break; } } break; default: { NimStringDesc* LOC58; NimStringDesc* LOC59; LOC58 = (NimStringDesc*)0; LOC59 = (NimStringDesc*)0; LOC59 = typetostring_321017_3876443242(a0.t, ((Tprefereddesc321011) 0)); LOC58 = rawNewString(LOC59->Sup.len + 14); appendString(LOC58, ((NimStringDesc*) &T839829468_489)); appendString(LOC58, LOC59); internalerror_197113_155036129(LOC58); } break; } } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, genarg_540787_839829468)(Tcproc530021* p0, Tnode293802* n_540790_839829468, Tsym293834* param0, Tnode293802* call0) { Ropeobj179006* result0; Tloc293816 a0; result0 = (Ropeobj179006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n_540790_839829468).kind == ((Tnodekind293020) 71))) goto LA3; result0 = genargstringtocstring_540776_839829468(p0, n_540790_839829468); } goto LA1; LA3: ; { Ttype293840* LOC6; Tnode293802* n0; LOC6 = (Ttype293840*)0; LOC6 = skiptypes_297099_850551059((*param0).typ, IL64(211106240964864)); if (!((*LOC6).kind == ((Ttypekind293244) 27) || (*LOC6).kind == ((Ttypekind293244) 48))) goto LA7; { if (!!(((*n_540790_839829468).kind == ((Tnodekind293020) 64)))) goto LA11; n0 = n_540790_839829468; } goto LA9; LA11: ; { n0 = (*n_540790_839829468).kindU.S6.sons->data[((NI) 0)]; } LA9: ; result0 = openarrayloc_540665_839829468(p0, n0); } goto LA1; LA7: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ccgintroducedptr_534609_839829468(param0); if (!LOC15) goto LA16; initlocexpr_540283_839829468(p0, n_540790_839829468, (&a0)); result0 = addrloc_539204_839829468(a0); } goto LA1; LA16: ; { NIM_BOOL LOC19; NIM_BOOL LOC20; NIM_BOOL LOC21; Tnode293802* callee0; LOC19 = (NIM_BOOL)0; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC21) goto LA22; LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC20 = ((*(*param0).typ).kind == ((Ttypekind293244) 23)); LA23: ; LOC19 = LOC20; if (!(LOC19)) goto LA24; LOC19 = ((*n_540790_839829468).kind == ((Tnodekind293020) 64)); LA24: ; if (!LOC19) goto LA25; initlocexprsingleuse_540289_839829468(p0, (*n_540790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0)); callee0 = (*call0).kindU.S6.sons->data[((NI) 0)]; { NIM_BOOL LOC29; NIM_BOOL LOC30; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*callee0).kind == ((Tnodekind293020) 3)); if (!(LOC30)) goto LA31; LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0)); LA32: ; if (!LOC29) goto LA33; result0 = addrloc_539204_839829468(a0); } goto LA27; LA33: ; { result0 = rdloc_539188_839829468(a0); } LA27: ; } goto LA1; LA25: ; { initlocexprsingleuse_540289_839829468(p0, n_540790_839829468, (&a0)); result0 = rdloc_539188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, genargnoparam_540938_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; Tloc293816 a0; result0 = (Ropeobj179006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n0).kind == ((Tnodekind293020) 71))) goto LA3; result0 = genargstringtocstring_540776_839829468(p0, n0); } goto LA1; LA3: ; { initlocexprsingleuse_540289_839829468(p0, n0, (&a0)); result0 = rdloc_539188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj179006*, getrawproctype_541459_839829468)(Tcproc530021* p0, Ttype293840* t0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = getclosuretype_536683_839829468((*p0).module, t0, ((Tclosuretypekind536679) 0)); return result0; } N_NIMCALL(NIM_BOOL, leftappearsonrightside_540329_839829468)(Tnode293802* le0, Tnode293802* ri0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!!((le0 == NIM_NIL))) goto LA3; { NI i_540364_839829468; NI HEX3Atmp_540376_839829468; NI LOC6; NI res_540379_839829468; i_540364_839829468 = (NI)0; HEX3Atmp_540376_839829468 = (NI)0; LOC6 = (NI)0; LOC6 = len_294081_850551059(ri0); HEX3Atmp_540376_839829468 = (LOC6 - 1); res_540379_839829468 = ((NI) 1); { while (1) { Tnode293802* r0; if (!(res_540379_839829468 <= HEX3Atmp_540376_839829468)) goto LA8; i_540364_839829468 = res_540379_839829468; r0 = HEX5BHEX5D_294238_850551059(ri0, i_540364_839829468); { Tanalysisresult474003 LOC11; LOC11 = (Tanalysisresult474003)0; LOC11 = ispartof_474340_788060399(le0, r0); if (!!((LOC11 == ((Tanalysisresult474003) 0)))) goto LA12; result0 = NIM_TRUE; goto BeforeRet; } LA12: ; res_540379_839829468 += ((NI) 1); } LA8: ; } } } LA3: ; }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, hasnoinit_540383_839829468)(Tnode293802* call0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC1)) goto LA2; LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, resetloc_539350_839829468)(Tcproc530021* p0, Tloc293816* loc0) { NIM_BOOL containsgcref0; Ttype293840* typ0; { containsgcref0 = containsgarbagecollectedref_321117_3876443242((*loc0).t); typ0 = skiptypes_297099_850551059((*loc0).t, IL64(211106242013440)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedcpptype_534476_839829468(typ0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscomplexvaluetype_539317_839829468(typ0); if (!!(LOC8)) goto LA9; { Tloc293816 nilloc0; if (!containsgcref0) goto LA13; memset((void*)(&nilloc0), 0, sizeof(nilloc0)); initloc_533273_839829468((&nilloc0), ((Tlockind293808) 1), (*loc0).t, ((Tstorageloc293812) 2)); nilloc0.r = rope_179277_2381377266(((NimStringDesc*) &T839829468_174)); genrefassign_539311_839829468(p0, (*loc0), nilloc0, 8); } goto LA11; LA13: ; { TY179507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468((*loc0)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1); } LA11: ; } goto LA6; LA9: ; { { TY179507 LOC22; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 6))&31U)))!=0)) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = addrloc_539204_839829468((*loc0)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1); } LA20: ; { TY533811 LOC27; if (!!(((*loc0).s == ((Tstorageloc293812) 2)))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = addrloc_539204_839829468((*loc0)); LOC27[1] = gentypeinfo_536941_839829468((*p0).module, (*loc0).t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2); genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 2), (*loc0).t, (*loc0), NIM_TRUE); } goto LA23; LA25: ; { TY533811 LOC29; usestringh_533345_839829468((*p0).module); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = addrloc_539204_839829468((*loc0)); LOC29[1] = rdloc_539188_839829468((*loc0)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2); genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 2), (*loc0).t, (*loc0), NIM_TRUE); } LA23: ; } LA6: ; }BeforeRet: ; } N_NIMCALL(Ropeobj179006*, addcomma_541464_839829468)(Ropeobj179006* r0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { if (!(r0 == NIM_NIL)) goto LA3; result0 = r0; } goto LA1; LA3: ; { TY534289 LOC6; Ropeobj179006* LOC7; memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj179006*)0; LOC7 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0); result0 = HEX26_179418_2381377266(r0, LOC7); } LA1: ; return result0; } N_NIMCALL(void, genclosurecall_541452_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0) { Tloc293816 op0; Ropeobj179006* pl0; Ttype293840* typ0; NI length0; Ropeobj179006* rawproc0; NimStringDesc* callpattern0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_540283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); pl0 = (Ropeobj179006*)0; typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_296351_850551059(ri0); { NI i_541613_839829468; NI HEX3Atmp_542214_839829468; NI res_542217_839829468; i_541613_839829468 = (NI)0; HEX3Atmp_542214_839829468 = (NI)0; HEX3Atmp_542214_839829468 = (NI)(length0 - ((NI) 1)); res_542217_839829468 = ((NI) 1); { while (1) { if (!(res_542217_839829468 <= HEX3Atmp_542214_839829468)) goto LA3; i_541613_839829468 = res_542217_839829468; { NI LOC6; Tnode293802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_296327_850551059(typ0); if (!(i_541613_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_541613_839829468]; { NIM_BOOL LOC11; Ropeobj179006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_329706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY534289 LOC18; Ropeobj179006* LOC19; if (!!((pl0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj179006*)0; LOC19 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_179482_2381377266(&pl0, LOC19); } LA16: ; LOC20 = (Ropeobj179006*)0; LOC20 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[i_541613_839829468], (*paramtype0).kindU.S4.sym, ri0); add_179482_2381377266(&pl0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj179006* LOC28; { TY534289 LOC26; Ropeobj179006* LOC27; if (!!((pl0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj179006*)0; LOC27 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_179482_2381377266(&pl0, LOC27); } LA24: ; LOC28 = (Ropeobj179006*)0; LOC28 = genargnoparam_540938_839829468(p0, (*ri0).kindU.S6.sons->data[i_541613_839829468]); add_179482_2381377266(&pl0, LOC28); } LA4: ; res_542217_839829468 += ((NI) 1); } LA3: ; } } rawproc0 = getrawproctype_541459_839829468(p0, typ0); { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 14))&31U)))!=0)) goto LA31; callpattern0 = copyString(((NimStringDesc*) &T839829468_492)); } goto LA29; LA31: ; { callpattern0 = copyString(((NimStringDesc*) &T839829468_493)); } LA29: ; { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36; { NIM_BOOL LOC40; LOC40 = (NIM_BOOL)0; LOC40 = isinvalidreturntype_534548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC40) goto LA41; { NI LOC45; TY534289 LOC48; Ropeobj179006* LOC49; LOC45 = (NI)0; LOC45 = sonslen_296351_850551059(ri0); if (!(((NI) 1) < LOC45)) goto LA46; memset((void*)LOC48, 0, sizeof(LOC48)); LOC49 = (Ropeobj179006*)0; LOC49 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0); add_179482_2381377266(&pl0, LOC49); } LA46: ; { NIM_BOOL LOC52; NIM_BOOL LOC54; Ropeobj179006* LOC67; NimStringDesc* LOC68; TY536235 LOC69; LOC52 = (NIM_BOOL)0; LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC52) goto LA53; LOC54 = (NIM_BOOL)0; LOC54 = leftappearsonrightside_540329_839829468(le0, ri0); LOC52 = !(LOC54); LA53: ; if (!LOC52) goto LA55; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA59; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA57; LA59: ; { NIM_BOOL LOC62; NIM_BOOL LOC64; LOC62 = (NIM_BOOL)0; LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC62)) goto LA63; LOC64 = (NIM_BOOL)0; LOC64 = hasnoinit_540383_839829468(ri0); LOC62 = !(LOC64); LA63: ; if (!LOC62) goto LA65; resetloc_539350_839829468(p0, d0); } goto LA57; LA65: ; LA57: ; LOC67 = (Ropeobj179006*)0; LOC67 = addrloc_539204_839829468((*d0)); add_179482_2381377266(&pl0, LOC67); LOC68 = (NimStringDesc*)0; LOC68 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC68, callpattern0); appendString(LOC68, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = op0.r; LOC69[1] = pl0; LOC69[2] = addcomma_541464_839829468(pl0); LOC69[3] = rawproc0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), LOC68, LOC69, 4); } goto LA50; LA55: ; { Tloc293816 tmp0; Ropeobj179006* LOC71; NimStringDesc* LOC72; TY536235 LOC73; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC71 = (Ropeobj179006*)0; LOC71 = addrloc_539204_839829468(tmp0); add_179482_2381377266(&pl0, LOC71); LOC72 = (NimStringDesc*)0; LOC72 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC72, callpattern0); appendString(LOC72, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = op0.r; LOC73[1] = pl0; LOC73[2] = addcomma_541464_839829468(pl0); LOC73[3] = rawproc0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), LOC72, LOC73, 4); genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA50: ; } goto LA38; LA41: ; { Tloc293816 list0; TY536235 LOC79; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA77; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA77: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_533273_839829468((&list0), ((Tlockind293808) 9), (*d0).t, ((Tstorageloc293812) 0)); memset((void*)LOC79, 0, sizeof(LOC79)); LOC79[0] = op0.r; LOC79[1] = pl0; LOC79[2] = addcomma_541464_839829468(pl0); LOC79[3] = rawproc0; list0.r = HEX25_179905_2381377266(callpattern0, LOC79, 4); genassignment_540264_839829468(p0, (*d0), list0, 0); } LA38: ; } goto LA34; LA36: ; { NimStringDesc* LOC81; TY536235 LOC82; LOC81 = (NimStringDesc*)0; LOC81 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC81, callpattern0); appendString(LOC81, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC82, 0, sizeof(LOC82)); LOC82[0] = op0.r; LOC82[1] = pl0; LOC82[2] = addcomma_541464_839829468(pl0); LOC82[3] = rawproc0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), LOC81, LOC82, 4); } LA34: ; } N_NIMCALL(Ropeobj179006*, genotherarg_540277_839829468)(Tcproc530021* p0, Tnode293802* ri0, NI i0, Ttype293840* typ0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NI LOC3; Tnode293802* paramtype0; LOC3 = (NI)0; LOC3 = sonslen_296327_850551059(typ0); if (!(i0 < LOC3)) goto LA4; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0]; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscompiletimeonly_329706_3876443242((*paramtype0).typ); if (!LOC8) goto LA9; result0 = NIM_NIL; } goto LA6; LA9: ; { NIM_BOOL LOC12; Tnode293802* LOC16; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind293244) 23)); if (!(LOC12)) goto LA13; LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind293020) 64)); LA13: ; if (!LOC12) goto LA14; LOC16 = (Tnode293802*)0; LOC16 = HEX5BHEX5D_294238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0)); result0 = genargnoparam_540938_839829468(p0, LOC16); } goto LA6; LA14: ; { result0 = genargnoparam_540938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA6: ; } goto LA1; LA4: ; { { if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 0))&31U)))!=0))) goto LA21; localerror_197085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501)); result0 = NIM_NIL; } goto LA19; LA21: ; { result0 = genargnoparam_540938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA19: ; } LA1: ; return result0; } N_NIMCALL(Tnode293802*, skipaddrderef_542433_839829468)(Tnode293802* node0) { Tnode293802* result0; Tnode293802* n0; NIM_BOOL isaddr0; { result0 = (Tnode293802*)0; n0 = node0; isaddr0 = NIM_FALSE; switch ((*n0).kind) { case ((Tnodekind293020) 63): case ((Tnodekind293020) 64): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; isaddr0 = NIM_TRUE; } break; case ((Tnodekind293020) 47): case ((Tnodekind293020) 65): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } break; default: { result0 = n0; goto BeforeRet; } break; } { if (!((*n0).kind == ((Tnodekind293020) 66))) goto LA6; n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } LA6: ; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isaddr0; if (!(LOC10)) goto LA11; LOC10 = ((*n0).kind == ((Tnodekind293020) 47) || (*n0).kind == ((Tnodekind293020) 65)); LA11: ; if (!LOC10) goto LA12; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA12: ; { if (!((*n0).kind == ((Tnodekind293020) 63) || (*n0).kind == ((Tnodekind293020) 64))) goto LA15; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA15: ; { result0 = node0; } LA8: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, genthisarg_542475_839829468)(Tcproc530021* p0, Tnode293802* ri_542478_839829468, NI i0, Ttype293840* typ0) { Ropeobj179006* result0; Tnode293802* ri0; Ttype293840* t0; result0 = (Ropeobj179006*)0; { NI LOC3; NimStringDesc* LOC6; LOC3 = (NI)0; LOC3 = sonslen_296327_850551059(typ0); if (!!((i0 < LOC3))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_197185_1689653243(T839829468_503); internalerror_197113_155036129(LOC6); } LA4: ; ri0 = HEX5BHEX5D_294238_850551059(ri_542478_839829468, i0); { while (1) { if (!((*ri0).kind == ((Tnodekind293020) 66))) goto LA8; ri0 = HEX5BHEX5D_294238_850551059(ri0, ((NI) 0)); } LA8: ; } t0 = skiptypes_297099_850551059((*typ0).sons->data[i0], 2048); { Tnode293802* x0; if (!((*t0).kind == ((Ttypekind293244) 23))) goto LA11; { if (!((*ri0).kind == ((Tnodekind293020) 64))) goto LA15; x0 = HEX5BHEX5D_294238_850551059(ri0, ((NI) 0)); } goto LA13; LA15: ; { x0 = ri0; } LA13: ; { if (!((*(*x0).typ).kind == ((Ttypekind293244) 21))) goto LA20; result0 = genargnoparam_540938_839829468(p0, x0); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA20: ; { NIM_BOOL LOC23; Tnode293802* LOC25; Tnode293802* LOC28; LOC23 = (NIM_BOOL)0; LOC23 = ((*x0).kind == ((Tnodekind293020) 65) || (*x0).kind == ((Tnodekind293020) 47)); if (!(LOC23)) goto LA24; LOC25 = (Tnode293802*)0; LOC25 = HEX5BHEX5D_294238_850551059(x0, ((NI) 0)); LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind293244) 21)); LA24: ; if (!LOC23) goto LA26; LOC28 = (Tnode293802*)0; LOC28 = HEX5BHEX5D_294238_850551059(x0, ((NI) 0)); result0 = genargnoparam_540938_839829468(p0, LOC28); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA26: ; { result0 = genargnoparam_540938_839829468(p0, x0); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA18: ; } goto LA9; LA11: ; { if (!((*t0).kind == ((Ttypekind293244) 21))) goto LA31; { Tnode293802* LOC37; if (!((*ri0).kind == ((Tnodekind293020) 63) || (*ri0).kind == ((Tnodekind293020) 64))) goto LA35; LOC37 = (Tnode293802*)0; LOC37 = HEX5BHEX5D_294238_850551059(ri0, ((NI) 0)); result0 = genargnoparam_540938_839829468(p0, LOC37); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } goto LA33; LA35: ; { result0 = genargnoparam_540938_839829468(p0, ri0); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } LA33: ; } goto LA9; LA31: ; { ri0 = skipaddrderef_542433_839829468(ri0); { if (!((*ri0).kind == ((Tnodekind293020) 63) || (*ri0).kind == ((Tnodekind293020) 64))) goto LA42; ri0 = HEX5BHEX5D_294238_850551059(ri0, ((NI) 0)); } LA42: ; result0 = genargnoparam_540938_839829468(p0, ri0); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA9: ; return result0; } N_NIMCALL(Ropeobj179006*, genpatterncall_542699_839829468)(Tcproc530021* p0, Tnode293802* ri_542702_839829468, NimStringDesc* pat0, Ttype293840* typ_542704_839829468) { Ropeobj179006* result0; NI i0; NI j0; result0 = (Ropeobj179006*)0; i0 = ((NI) 0); j0 = ((NI) 1); { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2; switch (((NU8)(pat0->data[i0]))) { case 64: { { NI LOC6; Ropeobj179006* LOC9; LOC6 = (NI)0; LOC6 = len_294081_850551059(ri_542702_839829468); if (!(j0 < LOC6)) goto LA7; LOC9 = (Ropeobj179006*)0; LOC9 = genotherarg_540277_839829468(p0, ri_542702_839829468, j0, typ_542704_839829468); add_179482_2381377266(&result0, LOC9); { NI k_542728_839829468; NI HEX3Atmp_542904_839829468; NI HEX3Atmp_542905_839829468; NI LOC11; NI res_542908_839829468; k_542728_839829468 = (NI)0; HEX3Atmp_542904_839829468 = (NI)0; HEX3Atmp_542905_839829468 = (NI)0; HEX3Atmp_542904_839829468 = (NI)(j0 + ((NI) 1)); LOC11 = (NI)0; LOC11 = len_294081_850551059(ri_542702_839829468); HEX3Atmp_542905_839829468 = (LOC11 - 1); res_542908_839829468 = HEX3Atmp_542904_839829468; { while (1) { TY534289 LOC14; Ropeobj179006* LOC15; Ropeobj179006* LOC16; if (!(res_542908_839829468 <= HEX3Atmp_542905_839829468)) goto LA13; k_542728_839829468 = res_542908_839829468; memset((void*)LOC14, 0, sizeof(LOC14)); LOC15 = (Ropeobj179006*)0; LOC15 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0); add_179482_2381377266(&result0, LOC15); LOC16 = (Ropeobj179006*)0; LOC16 = genotherarg_540277_839829468(p0, ri_542702_839829468, k_542728_839829468, typ_542704_839829468); add_179482_2381377266(&result0, LOC16); res_542908_839829468 += ((NI) 1); } LA13: ; } } } LA7: ; i0 += ((NI) 1); } break; case 35: { { Tnode293802* ri0; if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20; ri0 = HEX5BHEX5D_294238_850551059(ri_542702_839829468, j0); { Ttype293840* typ0; TY534289 LOC31; Ropeobj179006* LOC32; TY534289 LOC46; Ropeobj179006* LOC47; if (!((*ri0).kind == ((Tnodekind293020) 27) || (*ri0).kind == ((Tnodekind293020) 29) || (*ri0).kind == ((Tnodekind293020) 30) || (*ri0).kind == ((Tnodekind293020) 31) || (*ri0).kind == ((Tnodekind293020) 26) || (*ri0).kind == ((Tnodekind293020) 28) || (*ri0).kind == ((Tnodekind293020) 32))) goto LA24; typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { Ropeobj179006* LOC30; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28; LOC30 = (Ropeobj179006*)0; LOC30 = genargnoparam_540938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]); add_179482_2381377266(&result0, LOC30); } LA28: ; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0); add_179482_2381377266(&result0, LOC32); { NI LOC35; Ropeobj179006* LOC38; LOC35 = (NI)0; LOC35 = len_294081_850551059(ri0); if (!(((NI) 1) < LOC35)) goto LA36; LOC38 = (Ropeobj179006*)0; LOC38 = genotherarg_540277_839829468(p0, ri0, ((NI) 1), typ0); add_179482_2381377266(&result0, LOC38); } LA36: ; { NI k_542793_839829468; NI HEX3Atmp_542915_839829468; NI HEX3Atmp_542916_839829468; NI LOC40; NI res_542919_839829468; k_542793_839829468 = (NI)0; HEX3Atmp_542915_839829468 = (NI)0; HEX3Atmp_542916_839829468 = (NI)0; HEX3Atmp_542915_839829468 = (NI)(j0 + ((NI) 1)); LOC40 = (NI)0; LOC40 = len_294081_850551059(ri0); HEX3Atmp_542916_839829468 = (LOC40 - 1); res_542919_839829468 = HEX3Atmp_542915_839829468; { while (1) { TY534289 LOC43; Ropeobj179006* LOC44; Ropeobj179006* LOC45; if (!(res_542919_839829468 <= HEX3Atmp_542916_839829468)) goto LA42; k_542793_839829468 = res_542919_839829468; memset((void*)LOC43, 0, sizeof(LOC43)); LOC44 = (Ropeobj179006*)0; LOC44 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0); add_179482_2381377266(&result0, LOC44); LOC45 = (Ropeobj179006*)0; LOC45 = genotherarg_540277_839829468(p0, ri0, k_542793_839829468, typ0); add_179482_2381377266(&result0, LOC45); res_542919_839829468 += ((NI) 1); } LA42: ; } } memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (Ropeobj179006*)0; LOC47 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0); add_179482_2381377266(&result0, LOC47); } goto LA22; LA24: ; { localerror_197085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502)); } LA22: ; i0 += ((NI) 1); } goto LA18; LA20: ; { Ropeobj179006* LOC52; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50; LOC52 = (Ropeobj179006*)0; LOC52 = genthisarg_542475_839829468(p0, ri_542702_839829468, j0, typ_542704_839829468); add_179482_2381377266(&result0, LOC52); i0 += ((NI) 1); } goto LA18; LA50: ; { Tnode293802* arg0; Ropeobj179006* LOC58; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54; arg0 = skipaddrderef_542433_839829468((*ri_542702_839829468).kindU.S6.sons->data[j0]); { while (1) { if (!((*arg0).kind == ((Tnodekind293020) 63) || (*arg0).kind == ((Tnodekind293020) 64) || (*arg0).kind == ((Tnodekind293020) 66))) goto LA57; arg0 = HEX5BHEX5D_294238_850551059(arg0, ((NI) 0)); } LA57: ; } LOC58 = (Ropeobj179006*)0; LOC58 = genargnoparam_540938_839829468(p0, arg0); add_179482_2381377266(&result0, LOC58); } goto LA18; LA54: ; { Ropeobj179006* LOC60; LOC60 = (Ropeobj179006*)0; LOC60 = genotherarg_540277_839829468(p0, ri_542702_839829468, j0, typ_542704_839829468); add_179482_2381377266(&result0, LOC60); } LA18: ; j0 += ((NI) 1); i0 += ((NI) 1); } break; case 39: { NI idx0; NI stars0; idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC64; Ttype293840* t0; LOC64 = (NIM_BOOL)0; LOC64 = scancppgenericslot_535827_839829468(pat0, (&i0), (&idx0), (&stars0)); if (!LOC64) goto LA65; t0 = resolvestarsincpptype_535891_839829468(typ_542704_839829468, idx0, stars0); { TY534289 LOC71; Ropeobj179006* LOC72; if (!(t0 == NIM_NIL)) goto LA69; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj179006*)0; LOC72 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0); add_179482_2381377266(&result0, LOC72); } goto LA67; LA69: ; { Ropeobj179006* LOC74; LOC74 = (Ropeobj179006*)0; LOC74 = gettypedesc_536671_839829468((*p0).module, t0); add_179482_2381377266(&result0, LOC74); } LA67: ; } LA65: ; } break; default: { NI start0; start0 = i0; { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77; { if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80; i0 += ((NI) 1); } goto LA78; LA80: ; { goto LA76; } LA78: ; } LA77: ; } LA76: ; { NimStringDesc* LOC87; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85; LOC87 = (NimStringDesc*)0; LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1))); add_179487_2381377266(&result0, LOC87); } LA85: ; } break; } } LA2: ; } return result0; } N_NIMCALL(void, fixupcall_540410_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0, Ropeobj179006* callee0, Ropeobj179006* params0) { Ropeobj179006* pl0; TY534289 LOC1; Ropeobj179006* LOC2; Ropeobj179006* LOC3; Ttype293840* typ0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj179006*)0; LOC2 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0); LOC3 = (Ropeobj179006*)0; LOC3 = HEX26_179418_2381377266(callee0, LOC2); pl0 = HEX26_179418_2381377266(LOC3, params0); typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isinvalidreturntype_534548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC10) goto LA11; { TY534289 LOC17; Ropeobj179006* LOC18; if (!!((params0 == NIM_NIL))) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC18 = (Ropeobj179006*)0; LOC18 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0); add_179482_2381377266(&pl0, LOC18); } LA15: ; { NIM_BOOL LOC21; NIM_BOOL LOC23; Ropeobj179006* LOC36; TY534289 LOC37; Ropeobj179006* LOC38; LOC21 = (NIM_BOOL)0; LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC21) goto LA22; LOC23 = (NIM_BOOL)0; LOC23 = leftappearsonrightside_540329_839829468(le0, ri0); LOC21 = !(LOC23); LA22: ; if (!LOC21) goto LA24; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA28; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA26; LA28: ; { NIM_BOOL LOC31; NIM_BOOL LOC33; LOC31 = (NIM_BOOL)0; LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC31)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = hasnoinit_540383_839829468(ri0); LOC31 = !(LOC33); LA32: ; if (!LOC31) goto LA34; resetloc_539350_839829468(p0, d0); } goto LA26; LA34: ; LA26: ; LOC36 = (Ropeobj179006*)0; LOC36 = addrloc_539204_839829468((*d0)); add_179482_2381377266(&pl0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj179006*)0; LOC38 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0); add_179482_2381377266(&pl0, LOC38); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); } goto LA19; LA24: ; { Tloc293816 tmp0; Ropeobj179006* LOC40; TY534289 LOC41; Ropeobj179006* LOC42; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC40 = (Ropeobj179006*)0; LOC40 = addrloc_539204_839829468(tmp0); add_179482_2381377266(&pl0, LOC40); memset((void*)LOC41, 0, sizeof(LOC41)); LOC42 = (Ropeobj179006*)0; LOC42 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0); add_179482_2381377266(&pl0, LOC42); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA19: ; } goto LA8; LA11: ; { TY534289 LOC44; Ropeobj179006* LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj179006*)0; LOC45 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0); add_179482_2381377266(&pl0, LOC45); { NIM_BOOL LOC48; NIM_BOOL LOC49; LOC48 = (NIM_BOOL)0; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA50: ; LOC48 = LOC49; if (!(LOC48)) goto LA51; LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag293810) 8))&15U)))!=0); LA51: ; if (!LOC48) goto LA52; (*d0).k = ((Tlockind293808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag293810) 8)) % (sizeof(NU16)*8))); } goto LA46; LA52: ; { Tloc293816 list0; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA57; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA57: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_533273_839829468((&list0), ((Tlockind293808) 9), (*d0).t, ((Tstorageloc293812) 0)); list0.r = pl0; genassignment_540264_839829468(p0, (*d0), list0, 0); } LA46: ; } LA8: ; } goto LA4; LA6: ; { TY534289 LOC60; Ropeobj179006* LOC61; memset((void*)LOC60, 0, sizeof(LOC60)); LOC61 = (Ropeobj179006*)0; LOC61 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0); add_179482_2381377266(&pl0, LOC61); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); } LA4: ; } N_NIMCALL(void, geninfixcall_542929_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0) { Tloc293816 op0; Ttype293840* typ_542940_839829468; NI length0; NimStringDesc* pat0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_540283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); typ_542940_839829468 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_296351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC5; if (!!(!((pat0 == NIM_NIL)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_197185_1689653243(T839829468_498); internalerror_197113_155036129(LOC5); } LA3: ; { NIM_BOOL LOC8; Ropeobj179006* pl0; Ttype293840* typ0; LOC8 = (NIM_BOOL)0; LOC8 = contains_110056_4286263276(pat0, T839829468_500); if (!LOC8) goto LA9; pl0 = genpatterncall_542699_839829468(p0, ri0, pat0, typ_542940_839829468); typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag293810) 8))&15U)))!=0); LA20: ; if (!LOC17) goto LA21; (*d0).k = ((Tlockind293808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag293810) 8)) % (sizeof(NU16)*8))); } goto LA15; LA21: ; { Tloc293816 list0; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA26; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA26: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_533273_839829468((&list0), ((Tlockind293808) 9), (*d0).t, ((Tstorageloc293812) 0)); list0.r = pl0; genassignment_540264_839829468(p0, (*d0), list0, 0); } LA15: ; } goto LA11; LA13: ; { TY534289 LOC29; Ropeobj179006* LOC30; memset((void*)LOC29, 0, sizeof(LOC29)); LOC30 = (Ropeobj179006*)0; LOC30 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0); add_179482_2381377266(&pl0, LOC30); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); } LA11: ; } goto LA6; LA9: ; { Ropeobj179006* pl0; Ropeobj179006* params0; pl0 = NIM_NIL; { NI LOC34; Ropeobj179006* LOC37; LOC34 = (NI)0; LOC34 = len_294081_850551059(ri0); if (!(((NI) 1) < LOC34)) goto LA35; LOC37 = (Ropeobj179006*)0; LOC37 = genthisarg_542475_839829468(p0, ri0, ((NI) 1), typ_542940_839829468); add_179482_2381377266(&pl0, LOC37); } LA35: ; add_179482_2381377266(&pl0, op0.r); params0 = (Ropeobj179006*)0; { NI i_543425_839829468; NI HEX3Atmp_543609_839829468; NI res_543612_839829468; i_543425_839829468 = (NI)0; HEX3Atmp_543609_839829468 = (NI)0; HEX3Atmp_543609_839829468 = (NI)(length0 - ((NI) 1)); res_543612_839829468 = ((NI) 2); { while (1) { Ropeobj179006* LOC47; if (!(res_543612_839829468 <= HEX3Atmp_543609_839829468)) goto LA40; i_543425_839829468 = res_543612_839829468; { TY534289 LOC45; Ropeobj179006* LOC46; if (!!((params0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj179006*)0; LOC46 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0); add_179482_2381377266(&params0, LOC46); } LA43: ; LOC47 = (Ropeobj179006*)0; LOC47 = genotherarg_540277_839829468(p0, ri0, i_543425_839829468, typ_542940_839829468); add_179482_2381377266(&params0, LOC47); res_543612_839829468 += ((NI) 1); } LA40: ; } } fixupcall_540410_839829468(p0, le0, ri0, d0, pl0, params0); } LA6: ; } N_NIMCALL(void, gennamedparamcall_543616_839829468)(Tcproc530021* p0, Tnode293802* ri0, Tloc293816* d0) { Tloc293816 op0; Ropeobj179006* pl0; TY534289 LOC1; Ttype293840* typ0; NI length0; NimStringDesc* pat0; NI start0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_540283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); memset((void*)LOC1, 0, sizeof(LOC1)); pl0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0); typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_296351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC6; if (!!(!((pat0 == NIM_NIL)))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_197185_1689653243(T839829468_507); internalerror_197113_155036129(LOC6); } LA4: ; start0 = ((NI) 3); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = contains_110046_4286263276(pat0, 32); if (!LOC9) goto LA10; start0 = ((NI) 1); add_179482_2381377266(&pl0, op0.r); { TY534289 LOC16; Ropeobj179006* LOC17; Ropeobj179006* LOC18; if (!(((NI) 1) < length0)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj179006*)0; LOC17 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0); add_179482_2381377266(&pl0, LOC17); LOC18 = (Ropeobj179006*)0; LOC18 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_179482_2381377266(&pl0, LOC18); start0 = ((NI) 2); } LA14: ; } goto LA7; LA10: ; { { Ropeobj179006* LOC24; TY534289 LOC25; Ropeobj179006* LOC26; if (!(((NI) 1) < length0)) goto LA22; LOC24 = (Ropeobj179006*)0; LOC24 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_179482_2381377266(&pl0, LOC24); memset((void*)LOC25, 0, sizeof(LOC25)); LOC26 = (Ropeobj179006*)0; LOC26 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0); add_179482_2381377266(&pl0, LOC26); } LA22: ; add_179482_2381377266(&pl0, op0.r); { TY534289 LOC31; Ropeobj179006* LOC32; Ropeobj179006* LOC33; if (!(((NI) 2) < length0)) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0); add_179482_2381377266(&pl0, LOC32); LOC33 = (Ropeobj179006*)0; LOC33 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0); add_179482_2381377266(&pl0, LOC33); } LA29: ; } LA7: ; { NI i_544051_839829468; NI HEX3Atmp_544617_839829468; NI res_544620_839829468; i_544051_839829468 = (NI)0; HEX3Atmp_544617_839829468 = (NI)0; HEX3Atmp_544617_839829468 = (NI)(length0 - ((NI) 1)); res_544620_839829468 = start0; { while (1) { Tsym293834* param0; TY534289 LOC42; Ropeobj179006* LOC43; TY534289 LOC44; Ropeobj179006* LOC45; Ropeobj179006* LOC46; if (!(res_544620_839829468 <= HEX3Atmp_544617_839829468)) goto LA36; i_544051_839829468 = res_544620_839829468; { NI LOC39; LOC39 = (NI)0; LOC39 = sonslen_296327_850551059(typ0); if (!(LOC39 <= i_544051_839829468)) goto LA40; internalerror_197100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508)); } LA40: ; param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_544051_839829468]).kindU.S4.sym; memset((void*)LOC42, 0, sizeof(LOC42)); LOC43 = (Ropeobj179006*)0; LOC43 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0); add_179482_2381377266(&pl0, LOC43); add_179487_2381377266(&pl0, (*(*param0).name).s); memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj179006*)0; LOC45 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0); add_179482_2381377266(&pl0, LOC45); LOC46 = (Ropeobj179006*)0; LOC46 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[i_544051_839829468], param0, ri0); add_179482_2381377266(&pl0, LOC46); res_544620_839829468 += ((NI) 1); } LA36: ; } } { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = isinvalidreturntype_534548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC53) goto LA54; { NI LOC58; TY534289 LOC61; Ropeobj179006* LOC62; LOC58 = (NI)0; LOC58 = sonslen_296351_850551059(ri0); if (!(((NI) 1) < LOC58)) goto LA59; memset((void*)LOC61, 0, sizeof(LOC61)); LOC62 = (Ropeobj179006*)0; LOC62 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0); add_179482_2381377266(&pl0, LOC62); } LA59: ; { TY534289 LOC71; Ropeobj179006* LOC72; Ropeobj179006* LOC73; TY534289 LOC74; Ropeobj179006* LOC75; if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA69; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } LA69: ; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj179006*)0; LOC72 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0); add_179482_2381377266(&pl0, LOC72); LOC73 = (Ropeobj179006*)0; LOC73 = addrloc_539204_839829468((*d0)); add_179482_2381377266(&pl0, LOC73); memset((void*)LOC74, 0, sizeof(LOC74)); LOC75 = (Ropeobj179006*)0; LOC75 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0); add_179482_2381377266(&pl0, LOC75); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); } goto LA63; LA65: ; { Tloc293816 tmp0; Ropeobj179006* LOC77; TY534289 LOC78; Ropeobj179006* LOC79; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC77 = (Ropeobj179006*)0; LOC77 = addrloc_539204_839829468(tmp0); add_179482_2381377266(&pl0, LOC77); memset((void*)LOC78, 0, sizeof(LOC78)); LOC79 = (Ropeobj179006*)0; LOC79 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0); add_179482_2381377266(&pl0, LOC79); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA63: ; } goto LA51; LA54: ; { TY534289 LOC81; Ropeobj179006* LOC82; Tloc293816 list0; memset((void*)LOC81, 0, sizeof(LOC81)); LOC82 = (Ropeobj179006*)0; LOC82 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0); add_179482_2381377266(&pl0, LOC82); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA85; gettemp_538032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA85: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_533273_839829468((&list0), ((Tlockind293808) 9), NIM_NIL, ((Tstorageloc293812) 0)); list0.r = pl0; genassignment_540264_839829468(p0, (*d0), list0, 0); } LA51: ; } goto LA47; LA49: ; { TY534289 LOC88; Ropeobj179006* LOC89; memset((void*)LOC88, 0, sizeof(LOC88)); LOC89 = (Ropeobj179006*)0; LOC89 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0); add_179482_2381377266(&pl0, LOC89); line_533690_839829468(p0, ((Tcprocsection530011) 2), pl0); } LA47: ; } N_NIMCALL(void, genprefixcall_540960_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0) { Tloc293816 op0; Ropeobj179006* params0; Ttype293840* typ0; NI length0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_540283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); params0 = (Ropeobj179006*)0; typ0 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_296351_850551059(ri0); { NI i_541213_839829468; NI HEX3Atmp_541445_839829468; NI res_541448_839829468; i_541213_839829468 = (NI)0; HEX3Atmp_541445_839829468 = (NI)0; HEX3Atmp_541445_839829468 = (NI)(length0 - ((NI) 1)); res_541448_839829468 = ((NI) 1); { while (1) { if (!(res_541448_839829468 <= HEX3Atmp_541445_839829468)) goto LA3; i_541213_839829468 = res_541448_839829468; { NI LOC6; Tnode293802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_296327_850551059(typ0); if (!(i_541213_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_541213_839829468]; { NIM_BOOL LOC11; Ropeobj179006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_329706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY534289 LOC18; Ropeobj179006* LOC19; if (!!((params0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj179006*)0; LOC19 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_179482_2381377266(&params0, LOC19); } LA16: ; LOC20 = (Ropeobj179006*)0; LOC20 = genarg_540787_839829468(p0, (*ri0).kindU.S6.sons->data[i_541213_839829468], (*paramtype0).kindU.S4.sym, ri0); add_179482_2381377266(&params0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj179006* LOC28; { TY534289 LOC26; Ropeobj179006* LOC27; if (!!((params0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj179006*)0; LOC27 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_179482_2381377266(&params0, LOC27); } LA24: ; LOC28 = (Ropeobj179006*)0; LOC28 = genargnoparam_540938_839829468(p0, (*ri0).kindU.S6.sons->data[i_541213_839829468]); add_179482_2381377266(&params0, LOC28); } LA4: ; res_541448_839829468 += ((NI) 1); } LA3: ; } } fixupcall_540410_839829468(p0, le0, ri0, d0, op0.r, params0); } static N_INLINE(void, poststmtactions_533942_839829468)(Tcproc530021* p0) { Ropeobj179006** LOC1; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC1, (*(*p0).module).injectstmt); } N_NIMCALL(void, gencall_544632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { { Ttype293840* LOC3; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention293002) 8))) goto LA4; genclosurecall_541452_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_542929_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_543616_839829468(p0, e0, d0); } goto LA1; LA14: ; { genprefixcall_540960_839829468(p0, NIM_NIL, e0, d0); } LA1: ; poststmtactions_533942_839829468(p0); } N_NIMCALL(void, genreset_555731_839829468)(Tcproc530021* p0, Tnode293802* n0) { Tloc293816 a0; TY533811 LOC1; Ttype293840* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = addrloc_539204_839829468(a0); LOC2 = (Ttype293840*)0; LOC2 = skiptypes_297099_850551059(a0.t, IL64(211106242013440)); LOC1[1] = gentypeinfo_536941_839829468((*p0).module, LOC2); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2); } N_NIMCALL(void, genecho_555369_839829468)(Tcproc530021* p0, Tnode293802* n0) { NIM_BOOL LOC6; Ropeobj179006* args0; Tloc293816 a0; TY533811 LOC18; NimStringDesc* LOC19; NI LOC20; NimStringDesc* LOC21; TY534289 LOC22; { NimStringDesc* LOC5; if (!!(((*n0).kind == ((Tnodekind293020) 41)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_197185_1689653243(T839829468_512); internalerror_197113_155036129(LOC5); } LA3: ; LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513)); args0 = NIM_NIL; memset((void*)(&a0), 0, sizeof(a0)); { NI i_555404_839829468; NI HEX3Atmp_555431_839829468; NI LOC8; NI res_555434_839829468; i_555404_839829468 = (NI)0; HEX3Atmp_555431_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_294081_850551059(n0); HEX3Atmp_555431_839829468 = (NI)(LOC8 - ((NI) 1)); res_555434_839829468 = ((NI) 0); { while (1) { if (!(res_555434_839829468 <= HEX3Atmp_555431_839829468)) goto LA10; i_555404_839829468 = res_555434_839829468; { Tnode293802* LOC13; LOC13 = (Tnode293802*)0; LOC13 = skipconv_329882_3876443242((*n0).kindU.S6.sons->data[i_555404_839829468]); if (!((*LOC13).kind == ((Tnodekind293020) 23))) goto LA14; add_179487_2381377266(&args0, ((NimStringDesc*) &T839829468_514)); } goto LA11; LA14: ; { TY179507 LOC17; initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[i_555404_839829468], (&a0)); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_539188_839829468(a0); addf_180205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1); } LA11: ; res_555434_839829468 += ((NI) 1); } LA10: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (NimStringDesc*)0; LOC20 = (NI)0; LOC20 = len_294081_850551059(n0); LOC21 = (NimStringDesc*)0; LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20))); LOC19 = rawNewString(LOC21->Sup.len + tnl_177644_4151366050->Sup.len + 0); appendString(LOC19, LOC21); appendString(LOC19, tnl_177644_4151366050); LOC18[0] = makecstring_192638_155036129(LOC19); LOC18[1] = args0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2); memset((void*)LOC22, 0, sizeof(LOC22)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0); } N_NIMCALL(void, genseqconstr_556004_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { Tloc293816 arr0; NI LOC5; Ropeobj179006* LOC6; memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA3; gettemp_538032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA3: ; LOC5 = (NI)0; LOC5 = sonslen_296351_850551059(t0); LOC6 = (Ropeobj179006*)0; LOC6 = intliteral_540270_839829468(((NI64) (LOC5))); gennewseqaux_555795_839829468(p0, (*d0), LOC6); { NI i_556031_839829468; NI HEX3Atmp_556039_839829468; NI LOC8; NI res_556042_839829468; i_556031_839829468 = (NI)0; HEX3Atmp_556039_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = sonslen_296351_850551059(t0); HEX3Atmp_556039_839829468 = (NI)(LOC8 - ((NI) 1)); res_556042_839829468 = ((NI) 0); { while (1) { Ttype293840* LOC11; Ttype293840* LOC12; TY533811 LOC13; if (!(res_556042_839829468 <= HEX3Atmp_556039_839829468)) goto LA10; i_556031_839829468 = res_556042_839829468; LOC11 = (Ttype293840*)0; LOC11 = skiptypes_297099_850551059((*t0).typ, IL64(211106232576256)); LOC12 = (Ttype293840*)0; LOC12 = elemtype_321394_3876443242(LOC11); initloc_533273_839829468((&arr0), ((Tlockind293808) 6), LOC12, ((Tstorageloc293812) 3)); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_539188_839829468((*d0)); LOC13[1] = intliteral_540270_839829468(((NI64) (i_556031_839829468))); arr0.r = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2); arr0.s = ((Tstorageloc293812) 3); expr_540248_839829468(p0, (*t0).kindU.S6.sons->data[i_556031_839829468], (&arr0)); res_556042_839829468 += ((NI) 1); } LA10: ; } } gcusage_555439_839829468(t0); } N_NIMCALL(void, genarrtoseq_556046_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { Tloc293816 elem0; Tloc293816 a0; Tloc293816 arr0; NI L0; NI64 LOC9; Ropeobj179006* LOC10; { memset((void*)(&elem0), 0, sizeof(elem0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*t0).kind == ((Tnodekind293020) 41))) goto LA3; asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ); genseqconstr_556004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0); goto BeforeRet; } LA3: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA7; gettemp_538032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA7: ; LOC9 = (NI64)0; LOC9 = lengthord_321007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ); L0 = ((NI) (LOC9)); LOC10 = (Ropeobj179006*)0; LOC10 = intliteral_540270_839829468(((NI64) (L0))); gennewseqaux_555795_839829468(p0, (*d0), LOC10); initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI i_556090_839829468; NI HEX3Atmp_556103_839829468; NI res_556106_839829468; i_556090_839829468 = (NI)0; HEX3Atmp_556103_839829468 = (NI)0; HEX3Atmp_556103_839829468 = (NI)(L0 - ((NI) 1)); res_556106_839829468 = ((NI) 0); { while (1) { Ttype293840* LOC14; Ttype293840* LOC15; TY533811 LOC16; Ttype293840* LOC17; Ttype293840* LOC18; TY533811 LOC19; if (!(res_556106_839829468 <= HEX3Atmp_556103_839829468)) goto LA13; i_556090_839829468 = res_556106_839829468; LOC14 = (Ttype293840*)0; LOC14 = skiptypes_297099_850551059((*t0).typ, IL64(211106232576256)); LOC15 = (Ttype293840*)0; LOC15 = elemtype_321394_3876443242(LOC14); initloc_533273_839829468((&elem0), ((Tlockind293808) 6), LOC15, ((Tstorageloc293812) 3)); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468((*d0)); LOC16[1] = intliteral_540270_839829468(((NI64) (i_556090_839829468))); elem0.r = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2); elem0.s = ((Tstorageloc293812) 3); LOC17 = (Ttype293840*)0; LOC17 = skiptypes_297099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256)); LOC18 = (Ttype293840*)0; LOC18 = elemtype_321394_3876443242(LOC17); initloc_533273_839829468((&arr0), ((Tlockind293808) 6), LOC18, a0.s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_539188_839829468(a0); LOC19[1] = intliteral_540270_839829468(((NI64) (i_556090_839829468))); arr0.r = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2); genassignment_540264_839829468(p0, elem0, arr0, 3); res_556106_839829468 += ((NI) 1); } LA13: ; } } }BeforeRet: ; } N_NIMCALL(void, gendeepcopy_551374_839829468)(Tcproc530021* p0, Tloc293816 dest0, Tloc293816 src0) { Ttype293840* ty0; ty0 = skiptypes_297099_850551059(dest0.t, IL64(211106242013440)); switch ((*ty0).kind) { case ((Ttypekind293244) 21): case ((Ttypekind293244) 22): case ((Ttypekind293244) 25): case ((Ttypekind293244) 18): case ((Ttypekind293244) 17): case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { TY536238 LOC2; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = addrloc_539204_839829468(dest0); LOC2[1] = addrloc_539204_839829468(src0); LOC2[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3); } break; case ((Ttypekind293244) 24): case ((Ttypekind293244) 28): { TY536238 LOC4; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = addrloc_539204_839829468(dest0); LOC4[1] = rdloc_539188_839829468(src0); LOC4[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3); } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { TY536238 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = addrloc_539204_839829468(dest0); LOC6[1] = addrloc_539204_839829468(src0); LOC6[2] = gentypeinfo_536941_839829468((*p0).module, dest0.t); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3); } break; case ((Ttypekind293244) 19): { { Tctypekind530007 LOC10; TY536238 LOC13; NI64 LOC14; LOC10 = (Tctypekind530007)0; LOC10 = maptype_534393_839829468(ty0); if (!(LOC10 == ((Tctypekind530007) 17))) goto LA11; usestringh_533345_839829468((*p0).module); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_539188_839829468(dest0); LOC13[1] = rdloc_539188_839829468(src0); LOC14 = (NI64)0; LOC14 = getsize_321135_3876443242(dest0.t); LOC13[2] = rope_179401_2381377266(LOC14); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3); } goto LA8; LA11: ; { TY533811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468(dest0); LOC16[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2); } LA8: ; } break; case ((Ttypekind293244) 26): case ((Ttypekind293244) 2): case ((Ttypekind293244) 1): case ((Ttypekind293244) 14): case ((Ttypekind293244) 29): case ((Ttypekind293244) 31) ... ((Ttypekind293244) 44): case ((Ttypekind293244) 20): case ((Ttypekind293244) 23): { TY533811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_539188_839829468(dest0); LOC18[1] = rdloc_539188_839829468(src0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI293244))->Sup.len + 13); appendString(LOC20, ((NimStringDesc*) &T839829468_522)); appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI293244))); internalerror_197113_155036129(LOC20); } break; } } N_NIMCALL(void, genmagicexpr_558033_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tmagic293524 op0) { switch (op0) { case ((Tmagic293524) 127): case ((Tmagic293524) 126): { genandor_555311_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 99) ... ((Tmagic293524) 117): { unaryarith_553646_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 96) ... ((Tmagic293524) 98): { unaryarithoverflow_552633_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 52) ... ((Tmagic293524) 55): { binaryfloatarith_557728_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 56) ... ((Tmagic293524) 93): { binaryarith_552819_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 95): { geneqproc_553214_839829468(p0, e0, d0); } break; case ((Tmagic293524) 45) ... ((Tmagic293524) 51): { binaryarithoverflow_552262_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 149): { genrepr_556339_839829468(p0, e0, d0); } break; case ((Tmagic293524) 259): { gengettypeinfo_556383_839829468(p0, e0, d0); } break; case ((Tmagic293524) 156): { genswap_556638_839829468(p0, e0, d0); } break; case ((Tmagic293524) 25): { { if (!!((((*p0).options &(1U<<((NU)(((Toption170009) 5))&31U)))!=0))) goto LA14; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385)); } goto LA12; LA14: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386)); } LA12: ; } break; case ((Tmagic293524) 26): case ((Tmagic293524) 27): { Ttype293840* underlying0; underlying0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = !((((*p0).options &(1U<<((NU)(((Toption170009) 5))&31U)))!=0)); if (LOC20) goto LA21; LOC20 = ((*underlying0).kind >= ((Ttypekind293244) 40) && (*underlying0).kind <= ((Ttypekind293244) 44)); LA21: ; if (!LOC20) goto LA22; binarystmt_551501_839829468(p0, e0, d0, opr_558050_839829468[(op0)- 26]); } goto LA18; LA22: ; { Tloc293816 a0; Tloc293816 b0; Ttype293840* ranged0; Ropeobj179006* res0; NimStringDesc* LOC25; TY533811 LOC31; Ropeobj179006* LOC32; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); ranged0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656); LOC25 = (NimStringDesc*)0; { if (!((*underlying0).kind == ((Ttypekind293244) 35))) goto LA28; LOC25 = copyString(fun64_558055_839829468[(op0)- 26]); } goto LA26; LA28: ; { LOC25 = copyString(fun_558060_839829468[(op0)- 26]); } LA26: ; res0 = binaryarithoverflowraw_552235_839829468(p0, ranged0, a0, b0, LOC25); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = gettypedesc_536671_839829468((*p0).module, ranged0); LOC31[1] = res0; LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2); putintodest_551468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc293812) 0)); } LA18: ; } break; case ((Tmagic293524) 138): { genstrconcat_555452_839829468(p0, e0, d0); } break; case ((Tmagic293524) 144): { binarystmt_551501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394)); } break; case ((Tmagic293524) 145): { genstrappend_555554_839829468(p0, e0, d0); } break; case ((Tmagic293524) 146): { genseqelemappend_555683_839829468(p0, e0, d0); } break; case ((Tmagic293524) 128): { genstrequals_557666_839829468(p0, e0, d0); } break; case ((Tmagic293524) 129): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402)); } break; case ((Tmagic293524) 130): { binaryexpr_551549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403)); } break; case ((Tmagic293524) 157): { genisnil_553620_839829468(p0, e0, d0); } break; case ((Tmagic293524) 120): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406)); } break; case ((Tmagic293524) 121): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407)); } break; case ((Tmagic293524) 119): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408)); } break; case ((Tmagic293524) 118): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409)); } break; case ((Tmagic293524) 122): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410)); } break; case ((Tmagic293524) 123): { gendollar_556391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411)); } break; case ((Tmagic293524) 124): { expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Tmagic293524) 125): { genrepr_556339_839829468(p0, e0, d0); } break; case ((Tmagic293524) 12): { genof_556331_839829468(p0, e0, d0); } break; case ((Tmagic293524) 29): { gennew_555782_839829468(p0, e0); } break; case ((Tmagic293524) 30): { gennewfinalize_556110_839829468(p0, e0); } break; case ((Tmagic293524) 31): { gennewseq_555824_839829468(p0, e0); } break; case ((Tmagic293524) 32): { gennewseqofcap_555836_839829468(p0, e0, d0); } break; case ((Tmagic293524) 9): { Ttype293840* t0; TY179507 LOC55; Ropeobj179006* LOC56; t0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256); memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC56 = (Ropeobj179006*)0; LOC56 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc293812) 0)); } break; case ((Tmagic293524) 42): { gensomecast_557480_839829468(p0, e0, d0); } break; case ((Tmagic293524) 28): { genord_557474_839829468(p0, e0, d0); } break; case ((Tmagic293524) 35): case ((Tmagic293524) 8): case ((Tmagic293524) 34): case ((Tmagic293524) 36): case ((Tmagic293524) 33): { genarraylen_556415_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 37): case ((Tmagic293524) 38): { { NIM_BOOL LOC63; LOC63 = (NIM_BOOL)0; LOC63 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC63) goto LA64; LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA64: ; if (!!(LOC63)) goto LA65; unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440)); } goto LA61; LA65: ; { unaryexpr_552209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441)); } LA61: ; } break; case ((Tmagic293524) 43): { unarystmt_551527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443)); } break; case ((Tmagic293524) 44): { unarystmt_551527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444)); } break; case ((Tmagic293524) 151): { gensetlengthstr_556632_839829468(p0, e0, d0); } break; case ((Tmagic293524) 152): { gensetlengthseq_556500_839829468(p0, e0, d0); } break; case ((Tmagic293524) 39): case ((Tmagic293524) 40): case ((Tmagic293524) 41): case ((Tmagic293524) 133): case ((Tmagic293524) 132): case ((Tmagic293524) 131): case ((Tmagic293524) 134): case ((Tmagic293524) 135): case ((Tmagic293524) 136): case ((Tmagic293524) 148): { gensetop_557419_839829468(p0, e0, d0, op0); } break; case ((Tmagic293524) 161): case ((Tmagic293524) 162): case ((Tmagic293524) 159): case ((Tmagic293524) 160): case ((Tmagic293524) 150): case ((Tmagic293524) 163): { Tsym293834* opr0; opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NimStringDesc* LOC78; Ropeobj179006* LOC79; if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0))) goto LA76; LOC78 = (NimStringDesc*)0; LOC78 = HEX24_179856_2381377266((*opr0).loc.r); LOC79 = (Ropeobj179006*)0; LOC79 = cgsym_533403_839829468((*p0).module, LOC78); } LA76: ; gencall_544632_839829468(p0, e0, d0); } break; case ((Tmagic293524) 164): { genreset_555731_839829468(p0, e0); } break; case ((Tmagic293524) 17): { Tnode293802* LOC82; Tnode293802* LOC83; LOC82 = (Tnode293802*)0; LOC82 = HEX5BHEX5D_294238_850551059(e0, ((NI) 1)); LOC83 = (Tnode293802*)0; LOC83 = skipconv_329882_3876443242(LOC82); genecho_555369_839829468(p0, LOC83); } break; case ((Tmagic293524) 158): { genarrtoseq_556046_839829468(p0, e0, d0); } break; case ((Tmagic293524) 223) ... ((Tmagic293524) 257): case ((Tmagic293524) 19) ... ((Tmagic293524) 24): { localerror_197080_155036129((*e0).info, ((Tmsgkind192002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); } break; case ((Tmagic293524) 208): { Tnode293802* n0; n0 = wrapprocforspawn_436501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL); expr_540248_839829468(p0, n0, d0); } break; case ((Tmagic293524) 155): { Tnode293802* n0; n0 = liftparallel_479822_1773027539((*(*p0).module).module, e0); expr_540248_839829468(p0, n0, d0); } break; case ((Tmagic293524) 209): { Tloc293816 a0; Tloc293816 b0; Tnode293802* x0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { Tnode293802* LOC91; Tnode293802* LOC94; LOC91 = (Tnode293802*)0; LOC91 = HEX5BHEX5D_294238_850551059(e0, ((NI) 1)); if (!((*LOC91).kind == ((Tnodekind293020) 63) || (*LOC91).kind == ((Tnodekind293020) 64))) goto LA92; LOC94 = (Tnode293802*)0; LOC94 = HEX5BHEX5D_294238_850551059(e0, ((NI) 1)); x0 = HEX5BHEX5D_294238_850551059(LOC94, ((NI) 0)); } goto LA89; LA92: ; { x0 = HEX5BHEX5D_294238_850551059(e0, ((NI) 1)); } LA89: ; initlocexpr_540283_839829468(p0, x0, (&a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); gendeepcopy_551374_839829468(p0, a0, b0); } break; case ((Tmagic293524) 140): case ((Tmagic293524) 94): { gencall_544632_839829468(p0, e0, d0); } break; default: { NimStringDesc* LOC98; LOC98 = (NimStringDesc*)0; LOC98 = rawNewString(reprEnum((NI)op0, (&NTI293524))->Sup.len + 14); appendString(LOC98, ((NimStringDesc*) &T839829468_523)); appendString(LOC98, reprEnum((NI)op0, (&NTI293524))); internalerror_197100_155036129((*e0).info, LOC98); } break; } } N_NIMCALL(Ropeobj179006*, gensetnode_550664_839829468)(Tcproc530021* p0, Tnode293802* n0) { Ropeobj179006* result0; Tbitset340004* cs0; NI size0; NI64 LOC1; result0 = (Ropeobj179006*)0; cs0 = (Tbitset340004*)0; LOC1 = (NI64)0; LOC1 = getsize_321135_3876443242((*n0).typ); size0 = ((NI) (LOC1)); tobitset_341001_452470228(n0, (&cs0)); { NI id0; Ropeobj179006* LOC6; if (!(((NI) 8) < size0)) goto LA4; id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC6 = (Ropeobj179006*)0; LOC6 = rope_179401_2381377266(((NI64) (id0))); result0 = HEX26_179418_2381377266((*(*p0).module).tmpbase, LOC6); { TY536238 LOC11; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_536671_839829468((*p0).module, (*n0).typ); LOC11[1] = result0; LOC11[2] = genrawsetdata_550629_839829468(cs0, size0); addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3); } LA9: ; } goto LA2; LA4: ; { result0 = genrawsetdata_550629_839829468(cs0, size0); } LA2: ; return result0; } N_NIMCALL(void, gensetconstr_558496_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Tloc293816 idx0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&idx0), 0, sizeof(idx0)); { Ropeobj179006* LOC5; if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag293427) 4))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = gensetnode_550664_839829468(p0, e0); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc293812) 0)); } goto LA1; LA3: ; { { if (!((*d0).k == ((Tlockind293808) 0))) goto LA9; gettemp_538032_839829468(p0, (*e0).typ, d0, NIM_FALSE); } LA9: ; { NI64 LOC13; TY179507 LOC16; LOC13 = (NI64)0; LOC13 = getsize_321135_3876443242((*e0).typ); if (!(IL64(8) < LOC13)) goto LA14; usestringh_533345_839829468((*p0).module); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468((*d0)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1); { NI i_558537_839829468; NI HEX3Atmp_558603_839829468; NI LOC18; NI res_558606_839829468; i_558537_839829468 = (NI)0; HEX3Atmp_558603_839829468 = (NI)0; LOC18 = (NI)0; LOC18 = sonslen_296351_850551059(e0); HEX3Atmp_558603_839829468 = (NI)(LOC18 - ((NI) 1)); res_558606_839829468 = ((NI) 0); { while (1) { if (!(res_558606_839829468 <= HEX3Atmp_558603_839829468)) goto LA20; i_558537_839829468 = res_558606_839829468; { Ttype293840* LOC25; TY536235 LOC26; if (!((*(*e0).kindU.S6.sons->data[i_558537_839829468]).kind == ((Tnodekind293020) 44))) goto LA23; LOC25 = (Ttype293840*)0; LOC25 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC25, (&idx0), NIM_FALSE); initlocexpr_540283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_558537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_540283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_558537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_539188_839829468(idx0); LOC26[1] = rdloc_539188_839829468((*d0)); LOC26[2] = rdsetelemloc_556662_839829468(a0, (*e0).typ); LOC26[3] = rdsetelemloc_556662_839829468(b0, (*e0).typ); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4); } goto LA21; LA23: ; { TY533811 LOC28; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[i_558537_839829468], (&a0)); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = rdloc_539188_839829468((*d0)); LOC28[1] = rdsetelemloc_556662_839829468(a0, (*e0).typ); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2); } LA21: ; res_558606_839829468 += ((NI) 1); } LA20: ; } } } goto LA11; LA14: ; { NimStringDesc* ts0; NimStringDesc* LOC30; NI64 LOC31; NimStringDesc* LOC32; TY179507 LOC33; LOC30 = (NimStringDesc*)0; LOC31 = (NI64)0; LOC31 = getsize_321135_3876443242((*e0).typ); LOC32 = (NimStringDesc*)0; LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8))); LOC30 = rawNewString(LOC32->Sup.len + 2); appendString(LOC30, ((NimStringDesc*) &T839829468_45)); appendString(LOC30, LOC32); ts0 = LOC30; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_539188_839829468((*d0)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1); { NI i_558575_839829468; NI HEX3Atmp_558611_839829468; NI LOC35; NI res_558614_839829468; i_558575_839829468 = (NI)0; HEX3Atmp_558611_839829468 = (NI)0; LOC35 = (NI)0; LOC35 = sonslen_296351_850551059(e0); HEX3Atmp_558611_839829468 = (NI)(LOC35 - ((NI) 1)); res_558614_839829468 = ((NI) 0); { while (1) { if (!(res_558614_839829468 <= HEX3Atmp_558611_839829468)) goto LA37; i_558575_839829468 = res_558614_839829468; { Ttype293840* LOC42; NimStringDesc* LOC43; TY536235 LOC44; if (!((*(*e0).kindU.S6.sons->data[i_558575_839829468]).kind == ((Tnodekind293020) 44))) goto LA40; LOC42 = (Ttype293840*)0; LOC42 = getsystype_339150_3937434831(((Ttypekind293244) 31)); gettemp_538032_839829468(p0, LOC42, (&idx0), NIM_FALSE); initlocexpr_540283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_558575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_540283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_558575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); LOC43 = (NimStringDesc*)0; LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68); appendString(LOC43, ((NimStringDesc*) &T839829468_528)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_529)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_539188_839829468(idx0); LOC44[1] = rdloc_539188_839829468((*d0)); LOC44[2] = rdsetelemloc_556662_839829468(a0, (*e0).typ); LOC44[3] = rdsetelemloc_556662_839829468(b0, (*e0).typ); linef_533700_839829468(p0, ((Tcprocsection530011) 2), LOC43, LOC44, 4); } goto LA38; LA40: ; { NimStringDesc* LOC46; TY533811 LOC47; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[i_558575_839829468], (&a0)); LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36); appendString(LOC46, ((NimStringDesc*) &T839829468_530)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_531)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = rdloc_539188_839829468((*d0)); LOC47[1] = rdsetelemloc_556662_839829468(a0, (*e0).typ); linef_533700_839829468(p0, ((Tcprocsection530011) 2), LOC46, LOC47, 2); } LA38: ; res_558614_839829468 += ((NI) 1); } LA37: ; } } } LA11: ; } LA1: ; } N_NIMCALL(void, exprcomplexconst_559684_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Ttype293840* t0; Ropeobj179006* LOC1; NI id0; Ropeobj179006* tmp0; Ropeobj179006* LOC2; t0 = getuniquetype_529640_2036603609((*n0).typ); LOC1 = (Ropeobj179006*)0; LOC1 = gettypedesc_536671_839829468((*p0).module, t0); id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC2 = (Ropeobj179006*)0; LOC2 = rope_179401_2381377266(((NI64) (id0))); tmp0 = HEX26_179418_2381377266((*(*p0).module).tmpbase, LOC2); { TY536238 LOC7; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC7[1] = tmp0; LOC7[2] = genconstexpr_555849_839829468(p0, n0); addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3); } LA5: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA10; fillloc_533282_839829468(d0, ((Tlockind293808) 8), t0, tmp0, ((Tstorageloc293812) 1)); } goto LA8; LA10: ; { putdataintodest_551436_839829468(p0, d0, t0, tmp0); { if (!!(((*t0).kind == ((Ttypekind293244) 24) || (*t0).kind == ((Ttypekind293244) 28)))) goto LA15; (*d0).s = ((Tstorageloc293812) 1); } LA15: ; } LA8: ; } N_NIMCALL(NIM_BOOL, handleconstexpr_555853_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; NI LOC6; Ttype293840* t0; Ropeobj179006* LOC10; NI id0; Ropeobj179006* LOC11; Ropeobj179006* LOC12; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = ((*d0).k == ((Tlockind293808) 0)); if (!(LOC4)) goto LA5; LOC6 = (NI)0; LOC6 = len_294081_850551059(n0); LOC4 = (((NI) (((*n0).kind == ((Tnodekind293020) 38)))) < LOC6); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA7; LOC3 = isdeepconstexpr_319566_2616423590(n0); LA7: ; if (!LOC3) goto LA8; t0 = getuniquetype_529640_2036603609((*n0).typ); LOC10 = (Ropeobj179006*)0; LOC10 = gettypedesc_536671_839829468((*p0).module, t0); id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC11 = (Ropeobj179006*)0; LOC11 = rope_179401_2381377266(((NI64) (id0))); LOC12 = (Ropeobj179006*)0; LOC12 = HEX26_179418_2381377266((*(*p0).module).tmpbase, LOC11); fillloc_533282_839829468(d0, ((Tlockind293808) 8), t0, LOC12, ((Tstorageloc293812) 1)); { TY536238 LOC17; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_536671_839829468((*p0).module, t0); LOC17[1] = (*d0).r; LOC17[2] = genconstexpr_555849_839829468(p0, n0); addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; result0 = NIM_TRUE; } goto LA1; LA8: ; { result0 = NIM_FALSE; } LA1: ; return result0; } N_NIMCALL(void, genarrayconstr_559207_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 arr0; memset((void*)(&arr0), 0, sizeof(arr0)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_555853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA8; gettemp_538032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA8: ; { NI i_559234_839829468; NI HEX3Atmp_559242_839829468; NI LOC11; NI res_559245_839829468; i_559234_839829468 = (NI)0; HEX3Atmp_559242_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = sonslen_296351_850551059(n0); HEX3Atmp_559242_839829468 = (NI)(LOC11 - ((NI) 1)); res_559245_839829468 = ((NI) 0); { while (1) { Ttype293840* LOC14; Ttype293840* LOC15; TY533811 LOC16; if (!(res_559245_839829468 <= HEX3Atmp_559242_839829468)) goto LA13; i_559234_839829468 = res_559245_839829468; LOC14 = (Ttype293840*)0; LOC14 = skiptypes_297099_850551059((*n0).typ, IL64(211106232576256)); LOC15 = (Ttype293840*)0; LOC15 = elemtype_321394_3876443242(LOC14); initloc_533273_839829468((&arr0), ((Tlockind293808) 6), LOC15, (*d0).s); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468((*d0)); LOC16[1] = intliteral_540270_839829468(((NI64) (i_559234_839829468))); arr0.r = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2); expr_540248_839829468(p0, (*n0).kindU.S6.sons->data[i_559234_839829468], (&arr0)); res_559245_839829468 += ((NI) 1); } LA13: ; } } } LA4: ; } N_NIMCALL(void, gentupleconstr_558618_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 rec0; memset((void*)(&rec0), 0, sizeof(rec0)); { NIM_BOOL LOC3; Ttype293840* t0; Ropeobj179006* LOC6; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_555853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; t0 = getuniquetype_529640_2036603609((*n0).typ); LOC6 = (Ropeobj179006*)0; LOC6 = gettypedesc_536671_839829468((*p0).module, t0); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA9; gettemp_538032_839829468(p0, t0, d0, NIM_FALSE); } LA9: ; { NI i_558646_839829468; NI HEX3Atmp_558803_839829468; NI LOC12; NI res_558806_839829468; i_558646_839829468 = (NI)0; HEX3Atmp_558803_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = sonslen_296351_850551059(n0); HEX3Atmp_558803_839829468 = (NI)(LOC12 - ((NI) 1)); res_558806_839829468 = ((NI) 0); { while (1) { Tnode293802* it0; TY533811 LOC19; if (!(res_558806_839829468 <= HEX3Atmp_558803_839829468)) goto LA14; i_558646_839829468 = res_558806_839829468; it0 = (*n0).kindU.S6.sons->data[i_558646_839829468]; { if (!((*it0).kind == ((Tnodekind293020) 34))) goto LA17; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA17: ; initloc_533273_839829468((&rec0), ((Tlockind293808) 6), (*it0).typ, (*d0).s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_539188_839829468((*d0)); LOC19[1] = rope_179401_2381377266(((NI64) (i_558646_839829468))); rec0.r = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2); expr_540248_839829468(p0, it0, (&rec0)); res_558806_839829468 += ((NI) 1); } LA14: ; } } } LA4: ; } N_NIMCALL(Tsym293834*, lookupfieldagain_554153_839829468)(Tcproc530021* p0, Ttype293840* ty_554156_839829468, Tsym293834* field0, Ropeobj179006** r0) { Tsym293834* result0; Ttype293840* ty0; result0 = (Tsym293834*)0; ty0 = ty_554156_839829468; { while (1) { if (!!((ty0 == NIM_NIL))) goto LA2; ty0 = skiptypes_297099_850551059(ty0, IL64(211106247215360)); result0 = lookupinrecord_300119_2984716966((*ty0).n, (*field0).name); { if (!!((result0 == NIM_NIL))) goto LA5; goto LA1; } LA5: ; { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC9) goto LA10; LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA10: ; if (!!(LOC9)) goto LA11; add_179487_2381377266(r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; ty0 = getuniquetype_529640_2036603609((*ty0).sons->data[((NI) 0)]); } LA2: ; } LA1: ; { if (!(result0 == NIM_NIL)) goto LA15; internalerror_197100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532)); } LA15: ; return result0; } N_NIMCALL(void, genfieldcheck_554504_839829468)(Tcproc530021* p0, Tnode293802* e0, Ropeobj179006* obj0, Tsym293834* field0, Ttype293840* origty0) { Tloc293816 test0; Tloc293816 u0; Tloc293816 v0; memset((void*)(&test0), 0, sizeof(test0)); memset((void*)(&u0), 0, sizeof(u0)); memset((void*)(&v0), 0, sizeof(v0)); { NI i_554525_839829468; NI HEX3Atmp_555039_839829468; NI LOC2; NI res_555042_839829468; i_554525_839829468 = (NI)0; HEX3Atmp_555039_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(e0); HEX3Atmp_555039_839829468 = (NI)(LOC2 - ((NI) 1)); res_555042_839829468 = ((NI) 1); { while (1) { Tnode293802* it0; Tsym293834* op0; Tnode293802* disc0; Ropeobj179006* o0; Tsym293834* d0; NI id0; Tnode293802* LOC9; Ropeobj179006* strlit0; if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA4; i_554525_839829468 = res_555042_839829468; it0 = (*e0).kindU.S6.sons->data[i_554525_839829468]; op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!((*op0).magic == ((Tmagic293524) 99))) goto LA7; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA7: ; disc0 = skipconv_329882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]); initloc_533273_839829468((&test0), ((Tlockind293808) 0), (*it0).typ, ((Tstorageloc293812) 2)); initlocexpr_540283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0)); o0 = obj0; d0 = lookupfieldagain_554153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0); initloc_533273_839829468((&v0), ((Tlockind293808) 6), (*d0).typ, ((Tstorageloc293812) 0)); v0.r = o0; add_179487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257)); add_179482_2381377266(&v0.r, (*d0).loc.r); geninexpraux_554496_839829468(p0, it0, (&u0), (&v0), (&test0)); LOC9 = (Tnode293802*)0; LOC9 = newstrnode_294678_850551059(((Tnodekind293020) 20), (*(*field0).name).s); id0 = nodetabletestorset_343682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels))); { if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12; strlit0 = getstrlit_550468_839829468((*p0).module, (*(*field0).name).s); } goto LA10; LA12: ; { Ropeobj179006* LOC15; LOC15 = (Ropeobj179006*)0; LOC15 = rope_179401_2381377266(((NI64) (id0))); strlit0 = HEX26_179418_2381377266((*(*p0).module).tmpbase, LOC15); } LA10: ; { TY533811 LOC20; if (!((*op0).magic == ((Tmagic293524) 99))) goto LA18; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_539188_839829468(test0); LOC20[1] = strlit0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2); } goto LA16; LA18: ; { TY533811 LOC22; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = rdloc_539188_839829468(test0); LOC22[1] = strlit0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2); } LA16: ; res_555042_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genobjconstr_555903_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 tmp0; Ttype293840* t0; NIM_BOOL isref0; Ropeobj179006* r0; Ropeobj179006* LOC13; Ttype293840* ty0; { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_555853_839829468(p0, e0, d0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; memset((void*)(&tmp0), 0, sizeof(tmp0)); t0 = skiptypes_297099_850551059((*e0).typ, IL64(211106232576256)); gettemp_538032_839829468(p0, t0, (&tmp0), NIM_FALSE); isref0 = ((*t0).kind == ((Ttypekind293244) 22)); r0 = rdloc_539188_839829468(tmp0); { Ttype293840* LOC10; TY179507 LOC11; if (!isref0) goto LA8; rawgennew_555741_839829468(p0, tmp0, NIM_NIL); LOC10 = (Ttype293840*)0; LOC10 = lastson_296377_850551059(t0); t0 = skiptypes_297099_850551059(LOC10, IL64(211106232576256)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = r0; r0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1); gcusage_555439_839829468(e0); } goto LA6; LA8: ; { constructloc_539388_839829468(p0, tmp0, NIM_FALSE); } LA6: ; LOC13 = (Ropeobj179006*)0; LOC13 = gettypedesc_536671_839829468((*p0).module, t0); ty0 = getuniquetype_529640_2036603609(t0); { NI i_555944_839829468; NI HEX3Atmp_555997_839829468; NI LOC15; NI res_556000_839829468; i_555944_839829468 = (NI)0; HEX3Atmp_555997_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = len_294081_850551059(e0); HEX3Atmp_555997_839829468 = (LOC15 - 1); res_556000_839829468 = ((NI) 1); { while (1) { Tnode293802* it0; Tloc293816 tmp20; Tsym293834* field0; if (!(res_556000_839829468 <= HEX3Atmp_555997_839829468)) goto LA17; i_555944_839829468 = res_556000_839829468; it0 = (*e0).kindU.S6.sons->data[i_555944_839829468]; memset((void*)(&tmp20), 0, sizeof(tmp20)); tmp20.r = r0; field0 = lookupfieldagain_554153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r); { if (!((*field0).loc.r == NIM_NIL)) goto LA20; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533)); } LA20: ; { NIM_BOOL LOC24; NI LOC25; LOC24 = (NIM_BOOL)0; LOC25 = (NI)0; LOC25 = len_294081_850551059(it0); LOC24 = (LOC25 == ((NI) 3)); if (!(LOC24)) goto LA26; LOC24 = (((*p0).options &(1U<<((NU)(((Toption170009) 2))&31U)))!=0); LA26: ; if (!LOC24) goto LA27; genfieldcheck_554504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0); } LA27: ; add_179487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257)); add_179482_2381377266(&tmp20.r, (*field0).loc.r); tmp20.k = ((Tlockind293808) 1); tmp20.t = (*field0).loc.t; { if (!isref0) goto LA31; tmp20.s = ((Tstorageloc293812) 3); } goto LA29; LA31: ; { tmp20.s = ((Tstorageloc293812) 2); } LA29: ; expr_540248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20)); res_556000_839829468 += ((NI) 1); } LA17: ; } } { if (!((*d0).k == ((Tlockind293808) 0))) goto LA36; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI293816)); } goto LA34; LA36: ; { genassignment_540264_839829468(p0, (*d0), tmp0, 0); } LA34: ; }BeforeRet: ; } N_NIMCALL(void, gencast_557537_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Ttype293840* destt0; Ttype293840* srct0; destt0 = skiptypes_297099_850551059((*e0).typ, IL64(211106233624832)); srct0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; Ropeobj179006* lbl0; Tloc293816 tmp0; TY179507 LOC7; TY536238 LOC8; TY179507 LOC9; Ropeobj179006* LOC10; LOC3 = (NIM_BOOL)0; LOC3 = ((*destt0).kind >= ((Ttypekind293244) 36) && (*destt0).kind <= ((Ttypekind293244) 39) || (*destt0).kind == ((Ttypekind293244) 18) || (*destt0).kind == ((Ttypekind293244) 17) || (*destt0).kind == ((Ttypekind293244) 16) || (*destt0).kind == ((Ttypekind293244) 4)); if (LOC3) goto LA4; LOC3 = ((*srct0).kind >= ((Ttypekind293244) 36) && (*srct0).kind <= ((Ttypekind293244) 39) || (*srct0).kind == ((Ttypekind293244) 18) || (*srct0).kind == ((Ttypekind293244) 17) || (*srct0).kind == ((Ttypekind293244) 16) || (*srct0).kind == ((Ttypekind293244) 4)); LA4: ; if (!LOC3) goto LA5; (*p0).labels += ((NI) 1); lbl0 = rope_179401_2381377266(((NI64) ((*p0).labels))); memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = lbl0; tmp0.r = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_536671_839829468((*p0).module, srct0); LOC8[1] = gettypedesc_536671_839829468((*p0).module, destt0); LOC8[2] = lbl0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3); tmp0.k = ((Tlockind293808) 6); tmp0.t = srct0; tmp0.s = ((Tstorageloc293812) 2); tmp0.flags = 0; expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = lbl0; LOC10 = (Ropeobj179006*)0; LOC10 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s); } goto LA1; LA5: ; { gensomecast_557480_839829468(p0, e0, d0); } LA1: ; } N_NIMCALL(void, genconv_557632_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Ttype293840* desttype0; desttype0 = skiptypes_297099_850551059((*e0).typ, 8390656); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = comparetypes_327214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare325427) 1), 0); if (!LOC3) goto LA4; expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } goto LA1; LA4: ; { gensomecast_557480_839829468(p0, e0, d0); } LA1: ; } static N_INLINE(NIM_BOOL, iscppref_553807_839829468)(Tcproc530021* p0, Ttype293840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; NIM_BOOL LOC3; Ttype293840* LOC6; Ttype293840* LOC8; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; LOC2 = LOC3; if (!(LOC2)) goto LA5; LOC6 = (Ttype293840*)0; LOC6 = skiptypes_297099_850551059(typ0, IL64(211106232576256)); LOC2 = ((*LOC6).kind == ((Ttypekind293244) 23)); LA5: ; LOC1 = LOC2; if (!(LOC1)) goto LA7; LOC8 = (Ttype293840*)0; LOC8 = skiptypes_297099_850551059(typ0, IL64(211106232576256)); LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag293431) 18))&31U)))!=0)); LA7: ; result0 = LOC1; return result0; } N_NIMCALL(void, genaddr_554051_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { { Ttype293840* LOC3; Tloc293816 a0; Ropeobj179006* LOC6; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC3).kind == ((Ttypekind293244) 22) || (*LOC3).kind == ((Ttypekind293244) 21))) goto LA4; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC6 = (Ropeobj179006*)0; LOC6 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_52), a0.r); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } goto LA1; LA4: ; { NIM_BOOL LOC8; Tctypekind530007 LOC9; LOC8 = (NIM_BOOL)0; LOC9 = (Tctypekind530007)0; LOC9 = maptype_534393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LOC8 = (LOC9 == ((Tctypekind530007) 17)); if (LOC8) goto LA10; LOC8 = iscppref_553807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LA10: ; if (!LOC8) goto LA11; expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA11: ; { Tloc293816 a0; Ropeobj179006* LOC14; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC14 = (Ropeobj179006*)0; LOC14 = addrloc_539204_839829468(a0); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC14, a0.s); } LA1: ; } N_NIMCALL(void, genarrayelem_555093_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* ty0; Ttype293840* LOC1; Ropeobj179006* first0; NI64 LOC2; Ttype293840* LOC47; Ttype293840* LOC48; TY536238 LOC49; Ropeobj179006* LOC50; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, x0, (&a0)); initlocexpr_540283_839829468(p0, y0, (&b0)); LOC1 = (Ttype293840*)0; LOC1 = skiptypes_297099_850551059(a0.t, IL64(211106242013440)); ty0 = skiptypes_297099_850551059(LOC1, IL64(211106247256320)); LOC2 = (NI64)0; LOC2 = firstord_321001_3876443242(ty0); first0 = intliteral_540270_839829468(LOC2); { NIM_BOOL LOC5; LOC5 = (NIM_BOOL)0; LOC5 = (((*p0).options &(1U<<((NU)(((Toption170009) 4))&31U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag293431) 0))&31U)))!=0)); LA6: ; if (!LOC5) goto LA7; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = isconstexpr_319510_2616423590(y0); if (!!(LOC11)) goto LA12; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = firstord_321001_3876443242(ty0); if (!(LOC16 == IL64(0))) goto LA17; { NIM_BOOL LOC21; NI64 LOC22; NI64 LOC23; NI64 LOC25; NI64 LOC26; TY533811 LOC29; NI64 LOC30; LOC21 = (NIM_BOOL)0; LOC22 = (NI64)0; LOC22 = firstord_321001_3876443242(b0.t); LOC23 = (NI64)0; LOC23 = firstord_321001_3876443242(ty0); LOC21 = (LOC22 < LOC23); if (LOC21) goto LA24; LOC25 = (NI64)0; LOC25 = lastord_321004_3876443242(ty0); LOC26 = (NI64)0; LOC26 = lastord_321004_3876443242(b0.t); LOC21 = (LOC25 < LOC26); LA24: ; if (!LOC21) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdcharloc_539227_839829468(b0); LOC30 = (NI64)0; LOC30 = lastord_321004_3876443242(ty0); LOC29[1] = intliteral_540270_839829468(LOC30); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2); } LA27: ; } goto LA14; LA17: ; { TY536238 LOC32; NI64 LOC33; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rdcharloc_539227_839829468(b0); LOC32[1] = first0; LOC33 = (NI64)0; LOC33 = lastord_321004_3876443242(ty0); LOC32[2] = intliteral_540270_839829468(LOC33); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3); } LA14: ; } goto LA9; LA12: ; { NI64 idx0; idx0 = getordvalue_321129_3876443242(y0); { NIM_BOOL LOC37; NI64 LOC38; NI64 LOC40; LOC37 = (NIM_BOOL)0; LOC38 = (NI64)0; LOC38 = firstord_321001_3876443242(ty0); LOC37 = (idx0 < LOC38); if (LOC37) goto LA39; LOC40 = (NI64)0; LOC40 = lastord_321004_3876443242(ty0); LOC37 = (LOC40 < idx0); LA39: ; if (!LOC37) goto LA41; localerror_197080_155036129((*x0).info, ((Tmsgkind192002) 86), ((NimStringDesc*) &T839829468_490)); } LA41: ; } LA9: ; } LA7: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA45; (*d0).s = a0.s; } LA45: ; LOC47 = (Ttype293840*)0; LOC47 = skiptypes_297099_850551059(ty0, IL64(211106240964864)); LOC48 = (Ttype293840*)0; LOC48 = elemtype_321394_3876443242(LOC47); memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_539188_839829468(a0); LOC49[1] = rdcharloc_539227_839829468(b0); LOC49[2] = first0; LOC50 = (Ropeobj179006*)0; LOC50 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3); putintodest_551468_839829468(p0, d0, LOC48, LOC50, a0.s); } N_NIMCALL(void, genopenarrayelem_555169_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* LOC10; Ttype293840* LOC11; TY533811 LOC12; Ropeobj179006* LOC13; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, x0, (&a0)); initlocexpr_540283_839829468(p0, y0, (&b0)); { TY533811 LOC5; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 4))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(b0); LOC5[1] = rdloc_539188_839829468(a0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2); } LA3: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA8; (*d0).s = a0.s; } LA8: ; LOC10 = (Ttype293840*)0; LOC10 = skiptypes_297099_850551059(a0.t, IL64(211106240964864)); LOC11 = (Ttype293840*)0; LOC11 = elemtype_321394_3876443242(LOC10); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_539188_839829468(a0); LOC12[1] = rdcharloc_539227_839829468(b0); LOC13 = (Ropeobj179006*)0; LOC13 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2); putintodest_551468_839829468(p0, d0, LOC11, LOC13, a0.s); } N_NIMCALL(void, genseqelem_555205_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* ty0; Ttype293840* LOC27; Ttype293840* LOC28; TY533811 LOC29; Ropeobj179006* LOC30; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, x0, (&a0)); initlocexpr_540283_839829468(p0, y0, (&b0)); ty0 = skiptypes_297099_850551059(a0.t, IL64(211106242013440)); { Ttype293840* LOC5; if (!((*ty0).kind == ((Ttypekind293244) 22) || (*ty0).kind == ((Ttypekind293244) 21))) goto LA3; LOC5 = (Ttype293840*)0; LOC5 = lastson_296377_850551059(ty0); ty0 = skiptypes_297099_850551059(LOC5, IL64(211106242013440)); } LA3: ; { if (!(((*p0).options &(1U<<((NU)(((Toption170009) 4))&31U)))!=0)) goto LA8; { TY536238 LOC14; if (!((*ty0).kind == ((Ttypekind293244) 28))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_539188_839829468(b0); LOC14[1] = rdloc_539188_839829468(a0); LOC14[2] = lenfield_540305_839829468(p0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3); } goto LA10; LA12: ; { TY536238 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_539188_839829468(b0); LOC16[1] = rdloc_539188_839829468(a0); LOC16[2] = lenfield_540305_839829468(p0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3); } LA10: ; } LA8: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA19; (*d0).s = ((Tstorageloc293812) 3); } LA19: ; { Ttype293840* LOC23; TY179507 LOC26; LOC23 = (Ttype293840*)0; LOC23 = skiptypes_297099_850551059(a0.t, IL64(211106240964864)); if (!((*LOC23).kind == ((Ttypekind293244) 22) || (*LOC23).kind == ((Ttypekind293244) 21))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = a0.r; a0.r = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1); } LA24: ; LOC27 = (Ttype293840*)0; LOC27 = skiptypes_297099_850551059(a0.t, IL64(211106240964864)); LOC28 = (Ttype293840*)0; LOC28 = elemtype_321394_3876443242(LOC27); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_539188_839829468(a0); LOC29[1] = rdcharloc_539227_839829468(b0); LOC30 = (Ropeobj179006*)0; LOC30 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2); putintodest_551468_839829468(p0, d0, LOC28, LOC30, a0.s); } N_NIMCALL(void, gencstringelem_555144_839829468)(Tcproc530021* p0, Tnode293802* x0, Tnode293802* y0, Tloc293816* d0) { Tloc293816 a0; Tloc293816 b0; Ttype293840* ty0; Ttype293840* LOC5; Ttype293840* LOC6; TY533811 LOC7; Ropeobj179006* LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, x0, (&a0)); initlocexpr_540283_839829468(p0, y0, (&b0)); ty0 = skiptypes_297099_850551059(a0.t, IL64(211106242013440)); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ttype293840*)0; LOC5 = skiptypes_297099_850551059(ty0, IL64(211106240964864)); LOC6 = (Ttype293840*)0; LOC6 = elemtype_321394_3876443242(LOC5); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_539188_839829468(a0); LOC7[1] = rdcharloc_539227_839829468(b0); LOC8 = (Ropeobj179006*)0; LOC8 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2); putintodest_551468_839829468(p0, d0, LOC6, LOC8, a0.s); } N_NIMCALL(void, gentupleelem_554124_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; NI i0; Ropeobj179006* LOC5; Ttype293840* ty0; Ropeobj179006* r0; TY179507 LOC8; memset((void*)(&a0), 0, sizeof(a0)); i0 = (NI)0; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!((*d0).k == ((Tlockind293808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ropeobj179006*)0; LOC5 = gettypedesc_536671_839829468((*p0).module, a0.t); ty0 = getuniquetype_529640_2036603609(a0.t); r0 = rdloc_539188_839829468(a0); switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) { case ((Tnodekind293020) 6) ... ((Tnodekind293020) 15): { i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval)); } break; default: { internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545)); } break; } memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_179401_2381377266(((NI64) (i0))); addf_180205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1); putintodest_551468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s); } N_NIMCALL(void, genbracketexpr_555277_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Ttype293840* ty0; ty0 = skiptypes_297099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); { Ttype293840* LOC5; if (!((*ty0).kind == ((Ttypekind293244) 22) || (*ty0).kind == ((Ttypekind293244) 21))) goto LA3; LOC5 = (Ttype293840*)0; LOC5 = lastson_296377_850551059(ty0); ty0 = skiptypes_297099_850551059(LOC5, IL64(211106242013440)); } LA3: ; switch ((*ty0).kind) { case ((Ttypekind293244) 16): case ((Ttypekind293244) 4): { genarrayelem_555093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind293244) 27): case ((Ttypekind293244) 48): { genopenarrayelem_555169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind293244) 24): case ((Ttypekind293244) 28): { genseqelem_555205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind293244) 29): { gencstringelem_555144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind293244) 18): { gentupleelem_554124_839829468(p0, n0, d0); } break; default: { NimStringDesc* LOC12; LOC12 = (NimStringDesc*)0; LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI293244))->Sup.len + 21); appendString(LOC12, ((NimStringDesc*) &T839829468_547)); appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI293244))); appendChar(LOC12, 41); internalerror_197100_155036129((*n0).info, LOC12); } break; } } N_NIMCALL(void, genderef_544921_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, NIM_BOOL enforcederef0) { Tctypekind530007 mt0; { mt0 = maptype_534393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0); if (!(LOC3)) goto LA4; LOC3 = !(enforcederef0); LA4: ; if (!LOC3) goto LA5; expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); { Ttype293840* LOC9; LOC9 = (Ttype293840*)0; LOC9 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC9).kind == ((Ttypekind293244) 22))) goto LA10; (*d0).s = ((Tstorageloc293812) 3); } LA10: ; } goto LA1; LA5: ; { Tloc293816 a0; Ttype293840* typ0; memset((void*)(&a0), 0, sizeof(a0)); typ0 = skiptypes_297099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NIM_BOOL LOC15; NIM_BOOL LOC16; NIM_BOOL LOC17; NIM_BOOL LOC20; Tnode293802* LOC25; Tnode293802* LOC26; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC17 = (NIM_BOOL)0; LOC17 = ((*typ0).kind == ((Ttypekind293244) 23)); if (!(LOC17)) goto LA18; LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 18))&31U)))!=0)); LA18: ; LOC16 = LOC17; if (!(LOC16)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA21: ; LOC16 = LOC20; LA19: ; LOC15 = LOC16; if (!(LOC15)) goto LA22; LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 64)); LA22: ; if (!LOC15) goto LA23; LOC25 = (Tnode293802*)0; LOC25 = HEX5BHEX5D_294238_850551059(e0, ((NI) 0)); LOC26 = (Tnode293802*)0; LOC26 = HEX5BHEX5D_294238_850551059(LOC25, ((NI) 0)); initlocexprsingleuse_540289_839829468(p0, LOC26, d0); goto BeforeRet; } goto LA13; LA23: ; { initlocexprsingleuse_540289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA13: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA30; switch ((*typ0).kind) { case ((Ttypekind293244) 22): { (*d0).s = ((Tstorageloc293812) 3); } break; case ((Ttypekind293244) 23): { (*d0).s = ((Tstorageloc293812) 0); { NIM_BOOL LOC36; NIM_BOOL LOC37; NIM_BOOL LOC39; Ropeobj179006* LOC44; LOC36 = (NIM_BOOL)0; LOC37 = (NIM_BOOL)0; LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 18))&31U)))!=0)); if (!(LOC37)) goto LA38; LOC39 = (NIM_BOOL)0; LOC39 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC39) goto LA40; LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA40: ; LOC37 = LOC39; LA38: ; LOC36 = LOC37; if (!(LOC36)) goto LA41; LOC36 = ((*e0).kind == ((Tnodekind293020) 65)); LA41: ; if (!LOC36) goto LA42; LOC44 = (Ropeobj179006*)0; LOC44 = rdloc_539188_839829468(a0); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC44, a0.s); goto BeforeRet; } LA42: ; } break; case ((Ttypekind293244) 21): { (*d0).s = ((Tstorageloc293812) 0); } break; default: { NimStringDesc* LOC47; LOC47 = (NimStringDesc*)0; LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI293244))->Sup.len + 9); appendString(LOC47, ((NimStringDesc*) &T839829468_548)); appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI293244))); internalerror_197100_155036129((*e0).info, LOC47); } break; } } goto LA28; LA30: ; { NIM_BOOL LOC49; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA50: ; if (!LOC49) goto LA51; { NIM_BOOL LOC55; NIM_BOOL LOC56; Ropeobj179006* LOC61; LOC55 = (NIM_BOOL)0; LOC56 = (NIM_BOOL)0; LOC56 = ((*typ0).kind == ((Ttypekind293244) 23)); if (!(LOC56)) goto LA57; LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag293431) 18))&31U)))!=0)); LA57: ; LOC55 = LOC56; if (!(LOC55)) goto LA58; LOC55 = ((*e0).kind == ((Tnodekind293020) 65)); LA58: ; if (!LOC55) goto LA59; LOC61 = (Ropeobj179006*)0; LOC61 = rdloc_539188_839829468(a0); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC61, a0.s); goto BeforeRet; } LA59: ; } goto LA28; LA51: ; LA28: ; { NIM_BOOL LOC64; Ropeobj179006* LOC68; LOC64 = (NIM_BOOL)0; LOC64 = enforcederef0; if (!(LOC64)) goto LA65; LOC64 = (mt0 == ((Tctypekind530007) 18)); LA65: ; if (!LOC64) goto LA66; LOC68 = (Ropeobj179006*)0; LOC68 = rdloc_539188_839829468(a0); putintodest_551468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s); } goto LA62; LA66: ; { TY179507 LOC70; Ropeobj179006* LOC71; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rdloc_539188_839829468(a0); LOC71 = (Ropeobj179006*)0; LOC71 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1); putintodest_551468_839829468(p0, d0, (*e0).typ, LOC71, a0.s); } LA62: ; } LA1: ; }BeforeRet: ; } N_NIMCALL(Ttype293840*, genrecordfieldaux_554096_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0, Tloc293816* a0) { Ttype293840* result0; Ropeobj179006* LOC9; result0 = (Ttype293840*)0; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0); { if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind293020) 3)))) goto LA3; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549)); } LA3: ; { if (!((*d0).k == ((Tlockind293808) 0))) goto LA7; (*d0).s = (*a0).s; } LA7: ; LOC9 = (Ropeobj179006*)0; LOC9 = gettypedesc_536671_839829468((*p0).module, (*a0).t); result0 = getuniquetype_529640_2036603609((*a0).t); return result0; } N_NIMCALL(void, genrecordfield_554448_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* ty0; Ropeobj179006* r0; Tsym293834* f0; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_554096_839829468(p0, e0, d0, (&a0)); r0 = rdloc_539188_839829468(a0); f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; { TY179507 LOC5; if (!((*ty0).kind == ((Ttypekind293244) 18))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_179401_2381377266(((NI64) ((*f0).position))); addf_180205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1); putintodest_551468_839829468(p0, d0, (*f0).typ, r0, a0.s); } goto LA1; LA3: ; { Tsym293834* field0; TY179507 LOC11; field0 = lookupfieldagain_554153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA9; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550)); } LA9: ; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*field0).loc.r; addf_180205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1); putintodest_551468_839829468(p0, d0, (*field0).typ, r0, a0.s); } LA1: ; } N_NIMCALL(void, gencheckedrecordfield_555046_839829468)(Tcproc530021* p0, Tnode293802* e0, Tloc293816* d0) { { Tloc293816 a0; Ttype293840* ty0; Ropeobj179006* r0; Tsym293834* f0; Tsym293834* field0; TY179507 LOC9; Ropeobj179006* LOC10; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 2))&31U)))!=0)) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_554096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0)); r0 = rdloc_539188_839829468(a0); f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; field0 = lookupfieldagain_554153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA7; internalerror_197100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532)); } LA7: ; genfieldcheck_554504_839829468(p0, e0, r0, field0, ty0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = (*field0).loc.r; LOC10 = (Ropeobj179006*)0; LOC10 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1); add_179482_2381377266(&r0, LOC10); putintodest_551468_839829468(p0, d0, (*field0).typ, r0, a0.s); } goto LA1; LA3: ; { genrecordfield_554448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } LA1: ; } N_NIMCALL(NI, startblock_544978_839829468)(Tcproc530021* p0, NimStringDesc* start0, Ropeobj179006** args0, NI args0Len0) { NI result0; result0 = (NI)0; linecg_533707_839829468(p0, ((Tcprocsection530011) 2), start0, args0, args0Len0); (*p0).labels += ((NI) 1); result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0); (*p0).blocks = (TY530095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock530019), ((NI) ((NI)(result0 + ((NI) 1))))); (*p0).blocks->data[result0].id = ((NI) ((*p0).labels)); (*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0))); (*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock)); return result0; } N_NIMCALL(Ropeobj179006*, blockbody_545025_839829468)(Tblock530019* b0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = (*b0).sections[(((Tcprocsection530011) 0))- 0]; { TY179507 LOC5; if (!(((NI16) 0) < (*b0).framelen)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_179401_2381377266(((NI64) ((*b0).framelen))); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1); } LA3: ; add_179482_2381377266(&result0, (*b0).sections[(((Tcprocsection530011) 1))- 0]); add_179482_2381377266(&result0, (*b0).sections[(((Tcprocsection530011) 2))- 0]); return result0; } N_NIMCALL(void, endblock_545035_839829468)(Tcproc530021* p0, Ropeobj179006* blockend0) { NI topblock0; Ropeobj179006* LOC1; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); LOC1 = (Ropeobj179006*)0; LOC1 = blockbody_545025_839829468((&(*p0).blocks->data[topblock0])); add_179482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection530011) 2))- 0], LOC1); (*p0).blocks = (TY530095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock530019), ((NI) (topblock0))); line_533690_839829468(p0, ((Tcprocsection530011) 2), blockend0); } N_NIMCALL(void, endblock_545060_839829468)(Tcproc530021* p0) { NI topblock0; Ropeobj179006* blockend0; NI16 framelen0; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); { TY179507 LOC5; if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).blocks->data[topblock0].label; blockend0 = ropecg_533407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1); } goto LA1; LA3: ; { TY534289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); blockend0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0); } LA1: ; framelen0 = (*p0).blocks->data[topblock0].framelen; { TY179507 LOC12; if (!(((NI16) 0) < framelen0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_179401_2381377266(((NI64) (framelen0))); addf_180205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1); } LA10: ; endblock_545035_839829468(p0, blockend0); } N_NIMCALL(void, genblock_547083_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { NI oldbreakidx_547099_839829468; TY534289 LOC8; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_298440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind293808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_538032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; oldbreakidx_547099_839829468 = (*p0).breakidx; memset((void*)LOC8, 0, sizeof(LOC8)); (*p0).breakidx = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0); { Tsym293834* sym0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)))) goto LA11; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; (*sym0).loc.k = ((Tlockind293808) 10); (*sym0).position = (NI)((*p0).breakidx + ((NI) 1)); } LA11: ; expr_540248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0); endblock_545060_839829468(p0); (*p0).breakidx = oldbreakidx_547099_839829468; } N_NIMCALL(void, genstmtlistexpr_559402_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { NI length0; length0 = sonslen_296351_850551059(n0); { NI i_559420_839829468; NI HEX3Atmp_559424_839829468; NI res_559427_839829468; i_559420_839829468 = (NI)0; HEX3Atmp_559424_839829468 = (NI)0; HEX3Atmp_559424_839829468 = (NI)(length0 - ((NI) 2)); res_559427_839829468 = ((NI) 0); { while (1) { if (!(res_559427_839829468 <= HEX3Atmp_559424_839829468)) goto LA3; i_559420_839829468 = res_559427_839829468; genstmts_540244_839829468(p0, (*n0).kindU.S6.sons->data[i_559420_839829468]); res_559427_839829468 += ((NI) 1); } LA3: ; } } { if (!(((NI) 0) < length0)) goto LA6; expr_540248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); } LA6: ; } N_NIMCALL(void, genif_545982_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 a0; Ropeobj179006* lelse0; Ropeobj179006* lend0; memset((void*)(&a0), 0, sizeof(a0)); lelse0 = (Ropeobj179006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_298440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind293808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_538032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_533823_839829468(p0, n0); lend0 = getlabel_540217_839829468(p0); { NI i_546011_839829468; NI HEX3Atmp_546435_839829468; NI LOC9; NI res_546438_839829468; i_546011_839829468 = (NI)0; HEX3Atmp_546435_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = sonslen_296351_850551059(n0); HEX3Atmp_546435_839829468 = (NI)(LOC9 - ((NI) 1)); res_546438_839829468 = ((NI) 0); { while (1) { Tnode293802* it0; if (!(res_546438_839829468 <= HEX3Atmp_546435_839829468)) goto LA11; i_546011_839829468 = res_546438_839829468; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*d0).k == ((Tlockind293808) 1)); if (!(LOC14)) goto LA15; LOC14 = isemptytype_298440_850551059((*n0).typ); LA15: ; if (!LOC14) goto LA16; (*d0).k = ((Tlockind293808) 0); } LA16: ; it0 = (*n0).kindU.S6.sons->data[i_546011_839829468]; { NI LOC20; TY534289 LOC23; NI LOC24; TY533811 LOC25; LOC20 = (NI)0; LOC20 = len_294081_850551059(it0); if (!(LOC20 == ((NI) 2))) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC24 = (NI)0; LOC24 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0); initlocexprsingleuse_540289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0)); lelse0 = getlabel_540217_839829468(p0); (*p0).labels += ((NI) 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_539188_839829468(a0); LOC25[1] = lelse0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2); { NIM_BOOL LOC28; Ropeobj179006** LOC32; Ropeobj179006** LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC28) goto LA29; LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA29: ; if (!LOC28) goto LA30; LOC32 = (Ropeobj179006**)0; LOC32 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223)); expr_540248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); LOC33 = (Ropeobj179006**)0; LOC33 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280)); } goto LA26; LA30: ; { expr_540248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); } LA26: ; endblock_545060_839829468(p0); { NI LOC37; TY179507 LOC40; LOC37 = (NI)0; LOC37 = sonslen_296351_850551059(n0); if (!(((NI) 1) < LOC37)) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = lend0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1); } LA38: ; fixlabel_540230_839829468(p0, lelse0); } goto LA18; LA21: ; { NI LOC42; TY534289 LOC45; NI LOC46; LOC42 = (NI)0; LOC42 = len_294081_850551059(it0); if (!(LOC42 == ((NI) 1))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_540248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0); endblock_545060_839829468(p0); } goto LA18; LA43: ; { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557)); } LA18: ; res_546438_839829468 += ((NI) 1); } LA11: ; } } { NI LOC50; LOC50 = (NI)0; LOC50 = sonslen_296351_850551059(n0); if (!(((NI) 1) < LOC50)) goto LA51; fixlabel_540230_839829468(p0, lend0); } LA51: ; } N_NIMCALL(void, downconv_559581_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; expr_540248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA5: ; { Ttype293840* dest0; Tnode293802* arg0; Ttype293840* src0; Tloc293816 a0; Ropeobj179006* r0; NIM_BOOL isref0; Ttype293840* LOC10; dest0 = skiptypes_297099_850551059((*n0).typ, IL64(211106247256320)); arg0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { while (1) { if (!((*arg0).kind == ((Tnodekind293020) 66))) goto LA9; arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)]; } LA9: ; } src0 = skiptypes_297099_850551059((*arg0).typ, IL64(211106247256320)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, arg0, (&a0)); r0 = rdloc_539188_839829468(a0); LOC10 = (Ttype293840*)0; LOC10 = skiptypes_297099_850551059((*arg0).typ, IL64(211106232576256)); isref0 = ((*LOC10).kind == ((Ttypekind293244) 22) || (*LOC10).kind == ((Ttypekind293244) 21) || (*LOC10).kind == ((Ttypekind293244) 23)); { if (!isref0) goto LA13; add_179487_2381377266(&r0, ((NimStringDesc*) &T839829468_558)); } goto LA11; LA13: ; { add_179487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; { NI i_559650_839829468; NI HEX3Atmp_559677_839829468; NI LOC17; NI res_559680_839829468; i_559650_839829468 = (NI)0; HEX3Atmp_559677_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = inheritancediff_327252_3876443242(dest0, src0); HEX3Atmp_559677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17)); res_559680_839829468 = ((NI) 2); { while (1) { if (!(res_559680_839829468 <= HEX3Atmp_559677_839829468)) goto LA19; i_559650_839829468 = res_559680_839829468; add_179487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); res_559680_839829468 += ((NI) 1); } LA19: ; } } { if (!isref0) goto LA22; { NIM_BOOL LOC26; Ttype293840* LOC28; TY533811 LOC31; LOC26 = (NIM_BOOL)0; LOC26 = ((*d0).k == ((Tlockind293808) 0)); if (!(LOC26)) goto LA27; LOC28 = (Ttype293840*)0; LOC28 = skiptypes_297099_850551059((*n0).typ, IL64(211106232576256)); LOC26 = ((*LOC28).kind == ((Ttypekind293244) 22) || (*LOC28).kind == ((Ttypekind293244) 21) || (*LOC28).kind == ((Ttypekind293244) 23)); LA27: ; if (!LOC26) goto LA29; gettemp_538032_839829468(p0, (*n0).typ, d0, NIM_FALSE); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = rdloc_539188_839829468((*d0)); LOC31[1] = r0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2); } goto LA24; LA29: ; { r0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_52), r0); putintodest_551468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA24: ; } goto LA20; LA22: ; { putintodest_551468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA20: ; } LA1: ; } N_NIMCALL(void, upconv_559431_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* dest0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); dest0 = skiptypes_297099_850551059((*n0).typ, IL64(211106247256320)); { NIM_BOOL LOC3; NIM_BOOL LOC5; Ropeobj179006* r0; Ropeobj179006* nilcheck0; Ttype293840* t0; LOC3 = (NIM_BOOL)0; LOC3 = (((*p0).options &(1U<<((NU)(((Toption170009) 1))&31U)))!=0); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isobjlackingtypefield_534513_839829468(dest0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; r0 = rdloc_539188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_297099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype293840* LOC23; if (!((*t0).kind == ((Ttypekind293244) 23) || (*t0).kind == ((Ttypekind293244) 21) || (*t0).kind == ((Ttypekind293244) 22))) goto LA9; { if (!!(((*t0).kind == ((Ttypekind293244) 23)))) goto LA12; nilcheck0 = r0; } LA12: ; { NIM_BOOL LOC16; NIM_BOOL LOC18; TY179507 LOC22; LOC16 = (NIM_BOOL)0; LOC16 = !(((*t0).kind == ((Ttypekind293244) 23))); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA19: ; LOC16 = !(LOC18); LA17: ; if (!LOC16) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = r0; r0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1); } LA20: ; LOC23 = (Ttype293840*)0; LOC23 = lastson_296377_850551059(t0); t0 = skiptypes_297099_850551059(LOC23, IL64(211106232576256)); } LA9: ; } { NIM_BOOL LOC26; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA27: ; if (!!(LOC26)) goto LA28; { while (1) { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = ((*t0).kind == ((Ttypekind293244) 17)); if (!(LOC32)) goto LA33; LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA33: ; if (!LOC32) goto LA31; add_179487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); t0 = skiptypes_297099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA31: ; } } LA28: ; { TY536238 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = r0; LOC38[2] = gentypeinfo_536941_839829468((*p0).module, dest0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3); } goto LA34; LA36: ; { TY533811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = r0; LOC40[1] = gentypeinfo_536941_839829468((*p0).module, dest0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2); } LA34: ; } LA6: ; { TY533811 LOC45; Ropeobj179006* LOC46; if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind293244) 17)))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = gettypedesc_536671_839829468((*p0).module, (*n0).typ); LOC45[1] = rdloc_539188_839829468(a0); LOC46 = (Ropeobj179006*)0; LOC46 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC46, a0.s); } goto LA41; LA43: ; { TY533811 LOC48; Ropeobj179006* LOC49; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = gettypedesc_536671_839829468((*p0).module, dest0); LOC48[1] = addrloc_539204_839829468(a0); LOC49 = (Ropeobj179006*)0; LOC49 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC49, a0.s); } LA41: ; } N_NIMCALL(void, genrangechck_557590_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0, NimStringDesc* magic0) { Tloc293816 a0; Ttype293840* dest0; memset((void*)(&a0), 0, sizeof(a0)); dest0 = skiptypes_297099_850551059((*n0).typ, IL64(211106240964864)); { NIM_BOOL LOC3; Ttype293840* LOC5; TY533811 LOC8; Ropeobj179006* LOC9; LOC3 = (NIM_BOOL)0; LOC3 = !((((*p0).options &(1U<<((NU)(((Toption170009) 3))&31U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype293840*)0; LOC5 = skiptypes_297099_850551059(dest0, 1048576); LOC3 = ((*LOC5).kind >= ((Ttypekind293244) 40) && (*LOC5).kind <= ((Ttypekind293244) 44)); LA4: ; if (!LOC3) goto LA6; initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_536671_839829468((*p0).module, dest0); LOC8[1] = rdcharloc_539227_839829468(a0); LOC9 = (Ropeobj179006*)0; LOC9 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC9, a0.s); } goto LA1; LA6: ; { TY537475 LOC11; Ropeobj179006* LOC12; initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_536671_839829468((*p0).module, dest0); LOC11[1] = rdcharloc_539227_839829468(a0); LOC11[2] = genliteral_550476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0); LOC11[3] = genliteral_550476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0); LOC11[4] = rope_179277_2381377266(magic0); LOC12 = (Ropeobj179006*)0; LOC12 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5); putintodest_551468_839829468(p0, d0, dest0, LOC12, a0.s); } LA1: ; } N_NIMCALL(void, convstrtocstr_557642_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* LOC1; TY179507 LOC2; Ropeobj179006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype293840*)0; LOC1 = skiptypes_297099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_539188_839829468(a0); LOC3 = (Ropeobj179006*)0; LOC3 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1); putintodest_551468_839829468(p0, d0, LOC1, LOC3, a0.s); } N_NIMCALL(void, convcstrtostr_557654_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { Tloc293816 a0; Ttype293840* LOC1; TY179507 LOC2; Ropeobj179006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype293840*)0; LOC1 = skiptypes_297099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_539188_839829468(a0); LOC3 = (Ropeobj179006*)0; LOC3 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1); putintodest_551468_839829468(p0, d0, LOC1, LOC3, a0.s); gcusage_555439_839829468(n0); } static N_INLINE(NIM_BOOL, isroutine_298323_850551059)(Tsym293834* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0); return result0; } static N_INLINE(NIM_BOOL, isconstclosure_558810_839829468)(Tnode293802* n0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC2)) goto LA3; LOC2 = isroutine_298323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind293020) 23)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, genclosure_558836_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { { NIM_BOOL LOC3; Ropeobj179006* tmp0; Ropeobj179006* LOC6; TY536238 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = isconstclosure_558810_839829468(n0); if (!LOC3) goto LA4; (*(*p0).module).labels += ((NI) 1); LOC6 = (Ropeobj179006*)0; LOC6 = rope_179401_2381377266(((NI64) ((*(*p0).module).labels))); tmp0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_566), LOC6); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_536671_839829468((*p0).module, (*n0).typ); LOC7[1] = tmp0; LOC7[2] = genconstexpr_555849_839829468(p0, n0); addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3); putintodest_551468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc293812) 1)); } goto LA1; LA4: ; { Tloc293816 tmp0; Tloc293816 a0; Tloc293816 b0; TY536238 LOC14; memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0)); { Tnode293802* LOC11; LOC11 = (Tnode293802*)0; LOC11 = skipconv_329882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]); if (!((*LOC11).kind == ((Tnodekind293020) 155))) goto LA12; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567)); } LA12: ; gettemp_538032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_539188_839829468(tmp0); LOC14[1] = rdloc_539188_839829468(a0); LOC14[2] = rdloc_539188_839829468(b0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3); putlocintodest_540258_839829468(p0, d0, tmp0); } LA1: ; } static N_INLINE(Ropeobj179006*, assignlabel_545020_839829468)(Tblock530019* b0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = rope_179401_2381377266(((NI64) ((*b0).id))); unsureAsgnRef((void**) (&(*b0).label), HEX26_179452_2381377266(((NimStringDesc*) &T839829468_296), LOC1)); result0 = (*b0).label; return result0; } N_NIMCALL(void, gencomputedgoto_546744_839829468)(Tcproc530021* p0, Tnode293802* n0) { NI casepos0; NI arraysize0; NI id0; Ropeobj179006* tmp0; TY179507 LOC27; Ropeobj179006* gotoarray0; TY533811 LOC28; TY179507 LOC33; NI topblock0; Ropeobj179006* oldbody0; Ropeobj179006* tailb0; Ropeobj179006* taila0; Tnode293802* casestmt0; Tloc293816 a_546871_839829468; TY533811 LOC41; { casepos0 = ((NI) -1); arraysize0 = (NI)0; { NI i_546768_839829468; NI HEX3Atmp_546933_839829468; NI LOC2; NI res_546936_839829468; i_546768_839829468 = (NI)0; HEX3Atmp_546933_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_294081_850551059(n0); HEX3Atmp_546933_839829468 = (LOC2 - 1); res_546936_839829468 = ((NI) 0); { while (1) { Tnode293802* it0; if (!(res_546936_839829468 <= HEX3Atmp_546933_839829468)) goto LA4; i_546768_839829468 = res_546936_839829468; it0 = (*n0).kindU.S6.sons->data[i_546768_839829468]; { NI64 asize0; if (!((*it0).kind == ((Tnodekind293020) 97))) goto LA7; { Tnode293802* LOC11; LOC11 = (Tnode293802*)0; LOC11 = lastson_296364_850551059(it0); if (!!(((*LOC11).kind == ((Tnodekind293020) 85)))) goto LA12; localerror_197085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570)); goto BeforeRet; } LA12: ; casepos0 = i_546768_839829468; asize0 = lengthord_321007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); { if (!(IL64(10000) < asize0)) goto LA16; localerror_197085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571)); goto BeforeRet; } LA16: ; arraysize0 = ((NI) (asize0)); { NI64 LOC20; LOC20 = (NI64)0; LOC20 = firstord_321001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); if (!!((LOC20 == IL64(0)))) goto LA21; localerror_197085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572)); goto BeforeRet; } LA21: ; } LA7: ; res_546936_839829468 += ((NI) 1); } LA4: ; } } { if (!(casepos0 < ((NI) 0))) goto LA25; localerror_197085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573)); goto BeforeRet; } LA25: ; id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1)); (*p0).labels += (NI)(arraysize0 + ((NI) 1)); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rope_179401_2381377266(((NI64) (id0))); tmp0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = tmp0; LOC28[1] = rope_179401_2381377266(((NI64) (arraysize0))); gotoarray0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2); { NI i_546819_839829468; NI HEX3Atmp_546941_839829468; NI res_546944_839829468; i_546819_839829468 = (NI)0; HEX3Atmp_546941_839829468 = (NI)0; HEX3Atmp_546941_839829468 = (NI)(arraysize0 - ((NI) 1)); res_546944_839829468 = ((NI) 1); { while (1) { TY179507 LOC32; if (!(res_546944_839829468 <= HEX3Atmp_546941_839829468)) goto LA31; i_546819_839829468 = res_546944_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rope_179401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_546819_839829468)))); addf_180205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1); res_546944_839829468 += ((NI) 1); } LA31: ; } } memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rope_179401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0)))); addf_180205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1); line_533690_839829468(p0, ((Tcprocsection530011) 0), gotoarray0); topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]), NIM_NIL); { NI j_546854_839829468; NI HEX3Atmp_546949_839829468; NI HEX3Atmp_546950_839829468; NI LOC35; NI res_546953_839829468; j_546854_839829468 = (NI)0; HEX3Atmp_546949_839829468 = (NI)0; HEX3Atmp_546950_839829468 = (NI)0; HEX3Atmp_546949_839829468 = (NI)(casepos0 + ((NI) 1)); LOC35 = (NI)0; LOC35 = len_294081_850551059(n0); HEX3Atmp_546950_839829468 = (LOC35 - 1); res_546953_839829468 = HEX3Atmp_546949_839829468; { while (1) { if (!(res_546953_839829468 <= HEX3Atmp_546950_839829468)) goto LA37; j_546854_839829468 = res_546953_839829468; genstmts_540244_839829468(p0, (*n0).kindU.S6.sons->data[j_546854_839829468]); res_546953_839829468 += ((NI) 1); } LA37: ; } } tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]), NIM_NIL); { NI j_546866_839829468; NI HEX3Atmp_546958_839829468; NI res_546961_839829468; j_546866_839829468 = (NI)0; HEX3Atmp_546958_839829468 = (NI)0; HEX3Atmp_546958_839829468 = (NI)(casepos0 - ((NI) 1)); res_546961_839829468 = ((NI) 0); { while (1) { if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA40; j_546866_839829468 = res_546961_839829468; genstmts_540244_839829468(p0, (*n0).kindU.S6.sons->data[j_546866_839829468]); res_546961_839829468 += ((NI) 1); } LA40: ; } } taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection530011) 2))- 0]), HEX26_179418_2381377266(oldbody0, taila0)); casestmt0 = (*n0).kindU.S6.sons->data[casepos0]; memset((void*)(&a_546871_839829468), 0, sizeof(a_546871_839829468)); initlocexpr_540283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_546871_839829468)); memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = rdloc_539188_839829468(a_546871_839829468); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2); { NI i_546894_839829468; NI HEX3Atmp_546977_839829468; NI LOC43; NI res_546980_839829468; i_546894_839829468 = (NI)0; HEX3Atmp_546977_839829468 = (NI)0; LOC43 = (NI)0; LOC43 = len_294081_850551059(casestmt0); HEX3Atmp_546977_839829468 = (LOC43 - 1); res_546980_839829468 = ((NI) 1); { while (1) { TY534289 LOC46; NI LOC47; Tnode293802* it0; Tnode293802* LOC57; Ropeobj179006** LOC58; Ropeobj179006** LOC59; Tloc293816 a0; TY533811 LOC60; if (!(res_546980_839829468 <= HEX3Atmp_546977_839829468)) goto LA45; i_546894_839829468 = res_546980_839829468; memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (NI)0; LOC47 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_546894_839829468]; { NI j_546910_839829468; NI HEX3Atmp_546969_839829468; NI LOC49; NI res_546972_839829468; j_546910_839829468 = (NI)0; HEX3Atmp_546969_839829468 = (NI)0; LOC49 = (NI)0; LOC49 = len_294081_850551059(it0); HEX3Atmp_546969_839829468 = (NI)(LOC49 - ((NI) 2)); res_546972_839829468 = ((NI) 0); { while (1) { NI64 val0; TY179507 LOC56; if (!(res_546972_839829468 <= HEX3Atmp_546969_839829468)) goto LA51; j_546910_839829468 = res_546972_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_546910_839829468]).kind == ((Tnodekind293020) 44))) goto LA54; localerror_197085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA54: ; val0 = getordvalue_321129_3876443242((*it0).kindU.S6.sons->data[j_546910_839829468]); memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = intliteral_540270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1); res_546972_839829468 += ((NI) 1); } LA51: ; } } LOC57 = (Tnode293802*)0; LOC57 = lastson_296364_850551059(it0); genstmts_540244_839829468(p0, LOC57); LOC58 = (Ropeobj179006**)0; LOC58 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC58, tailb0); LOC59 = (Ropeobj179006**)0; LOC59 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); add_179482_2381377266(LOC59, taila0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC60, 0, sizeof(LOC60)); LOC60[0] = tmp0; LOC60[1] = rdloc_539188_839829468(a0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2); endblock_545060_839829468(p0); res_546980_839829468 += ((NI) 1); } LA45: ; } } }BeforeRet: ; } N_NIMCALL(void, genwhilestmt_546984_839829468)(Tcproc530021* p0, Tnode293802* t0) { Tloc293816 a0; NI oldbreakidx_547011_839829468; TY534289 LOC1; Tnode293802* loopbody0; memset((void*)(&a0), 0, sizeof(a0)); (*p0).withinloop += ((NI) 1); genlinedir_533823_839829468(p0, t0); oldbreakidx_547011_839829468 = (*p0).breakidx; memset((void*)LOC1, 0, sizeof(LOC1)); (*p0).breakidx = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); { NIM_BOOL LOC4; Ropeobj179006* label0; TY533811 LOC8; LOC4 = (NIM_BOOL)0; LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 6))); if (LOC4) goto LA5; LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0)); LA5: ; if (!LOC4) goto LA6; label0 = assignlabel_545020_839829468((&(*p0).blocks->data[(*p0).breakidx])); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(a0); LOC8[1] = label0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2); } LA6: ; loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)]; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = stmtscontainpragma_529083_2036603609(loopbody0, ((Tspecialword276003) 182)); if (!(LOC11)) goto LA12; LOC11 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 1))&7U)))!=0); LA12: ; if (!LOC11) goto LA13; { NIM_BOOL LOC17; NI LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NI)0; LOC18 = len_294081_850551059(loopbody0); LOC17 = (LOC18 == ((NI) 2)); if (!(LOC17)) goto LA19; LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)); LA19: ; if (!LOC17) goto LA20; loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)]; } LA20: ; gencomputedgoto_546744_839829468(p0, loopbody0); } goto LA9; LA13: ; { genstmts_540244_839829468(p0, loopbody0); } LA9: ; { TY534289 LOC27; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 19))&31U)))!=0)) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0); } LA25: ; endblock_545060_839829468(p0); (*p0).breakidx = oldbreakidx_547011_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, gengotovar_545258_839829468)(Tcproc530021* p0, Tnode293802* value0) { { if (!!(((*value0).kind >= ((Tnodekind293020) 5) && (*value0).kind <= ((Tnodekind293020) 15)))) goto LA3; localerror_197085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582)); } goto LA1; LA3: ; { TY179507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_179401_2381377266((*value0).kindU.S1.intval); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1); } LA1: ; } N_NIMCALL(void, varindynamiclib_539812_839829468)(Tcgen530027* m0, Tsym293834* sym0) { Tlib293820* lib0; Ropeobj179006* extname0; Ropeobj179006* tmp0; TY536235 LOC1; NimStringDesc* LOC2; TY533811 LOC3; lib0 = (*sym0).annex; extname0 = (*sym0).loc.r; loaddynamiclib_560480_839829468(m0, lib0); (*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag293810) 0))%(sizeof(NU16)*8)); tmp0 = mangledynlibproc_539816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); (*m0).labels += ((NI) 2); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC1[1] = gettypedesc_536671_839829468(m0, (*sym0).typ); LOC1[2] = (*lib0).name; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_179856_2381377266(extname0); LOC1[3] = makecstring_192638_155036129(LOC2); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = (*sym0).loc.r; LOC3[1] = gettypedesc_536671_839829468(m0, (*sym0).loc.t); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2); } N_NIMCALL(void, assignglobalvar_539819_839829468)(Tcproc530021* p0, Tsym293834* s0) { { { Ropeobj179006* LOC5; if (!((*s0).loc.k == ((Tlockind293808) 0))) goto LA3; LOC5 = (Ropeobj179006*)0; LOC5 = manglename_534205_839829468(s0); fillloc_533282_839829468((&(*s0).loc), ((Tlockind293808) 3), (*s0).typ, LOC5, ((Tstorageloc293812) 3)); } LA3: ; { Tcgen530027* q0; if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag293810) 4))&15U)))!=0)) goto LA8; q0 = findpendingmodule_533241_839829468((*p0).module, s0); { NIM_BOOL LOC12; NIM_BOOL LOC14; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_269862_2627731572((&(*q0).declaredthings), (*s0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; varindynamiclib_539812_839829468(q0, s0); } goto LA10; LA15: ; { asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_539816_839829468(s0)); } LA10: ; goto BeforeRet; } LA8: ; useheader_533369_839829468((*p0).module, s0); { if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)) goto LA20; goto BeforeRet; } LA20: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 22))&31U)))!=0)) goto LA24; declarethreadvar_539676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0)); } goto LA22; LA24: ; { Ropeobj179006* decl0; Ropeobj179006* td0; decl0 = NIM_NIL; td0 = gettypedesc_536671_839829468((*p0).module, (*s0).loc.t); { TY179507 LOC43; if (!(*s0).constraint == 0) goto LA29; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 5))&31U)))!=0)) goto LA33; add_179487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240)); } LA33: ; add_179482_2381377266(&decl0, td0); { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 8))&31U)))!=0)) goto LA37; add_179487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121)); } LA37: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag293184) 7))&31U)))!=0)) goto LA41; add_179487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*s0).loc.r; addf_180205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1); } goto LA27; LA29: ; { NimStringDesc* LOC45; TY533811 LOC46; LOC45 = (NimStringDesc*)0; LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3); appendString(LOC45, (*(*s0).constraint).kindU.S3.strval); appendString(LOC45, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC46, 0, sizeof(LOC46)); LOC46[0] = td0; LOC46[1] = (*s0).loc.r; decl0 = HEX25_179905_2381377266(LOC45, LOC46, 2); } LA27: ; add_179482_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 9))- 0], decl0); } LA22: ; { if (!(((NI) 0) < (*p0).withinloop)) goto LA49; resetloc_539350_839829468(p0, (&(*s0).loc)); } LA49: ; { TY536238 LOC55; NimStringDesc* LOC56; NimStringDesc* LOC57; if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC56 = (NimStringDesc*)0; LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1); appendString(LOC56, (*(*(*s0).owner).name).s); appendChar(LOC56, 46); appendString(LOC56, (*(*s0).name).s); LOC57 = (NimStringDesc*)0; LOC57 = nsuNormalize(LOC56); LOC55[0] = makecstring_192638_155036129(LOC57); LOC55[1] = (*s0).loc.r; LOC55[2] = gentypeinfo_536941_839829468((*p0).module, (*s0).typ); appcg_533632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection530005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3); } LA53: ; }BeforeRet: ; } N_NIMCALL(Ropeobj179006*, gentraverseprocforglobal_539032_839829468)(Tcgen530027* m0, Tsym293834* s0) { Ropeobj179006* result0; Ropeobj179006* LOC1; Ttraversalclosure538019 c0; Tcproc530021* p0; Ropeobj179006* sloc0; Ropeobj179006* header0; TY179507 LOC8; Ropeobj179006* generatedproc0; TY536235 LOC9; Ropeobj179006** LOC10; Ropeobj179006** LOC11; Ropeobj179006** LOC12; TY179507 LOC13; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = gentypeinfo_536941_839829468(m0, (*s0).loc.t); memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_530206_3723162438(NIM_NIL, m0); sloc0 = (*s0).loc.r; result0 = gettempname_534596_839829468(m0); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag293184) 22))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = emulatedthreadvars_533949_839829468(); LA5: ; if (!LOC4) goto LA6; accessthreadlocalvar_533945_839829468(p0, s0); sloc0 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_288), sloc0); } LA6: ; c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587)); c0.p = p0; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = result0; header0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1); gentraverseproc_538022_839829468((&c0), sloc0, (*s0).loc.t); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = header0; LOC10 = (Ropeobj179006**)0; LOC10 = s_530179_3723162438(p0, ((Tcprocsection530011) 0)); LOC9[1] = (*LOC10); LOC11 = (Ropeobj179006**)0; LOC11 = s_530179_3723162438(p0, ((Tcprocsection530011) 1)); LOC9[2] = (*LOC11); LOC12 = (Ropeobj179006**)0; LOC12 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); LOC9[3] = (*LOC12); generatedproc0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = header0; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, registergcroot_544762_839829468)(Tcproc530021* p0, Tsym293834* v0) { { NIM_BOOL LOC3; Ropeobj179006* prc0; Ropeobj179006** LOC7; TY179507 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((240 &(1U<<((NU)(gselectedgc_170133_2607990831)&7U)))!=0); if (!(LOC3)) goto LA4; LOC3 = containsgarbagecollectedref_321117_3876443242((*v0).loc.t); LA4: ; if (!LOC3) goto LA5; prc0 = gentraverseprocforglobal_539032_839829468((*p0).module, v0); LOC7 = (Ropeobj179006**)0; LOC7 = procsec_530194_3723162438((*(*p0).module).initproc, ((Tcprocsection530011) 1)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = prc0; appcg_533632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1); } LA5: ; } static N_INLINE(NIM_BOOL, isassignedimmediately_544781_839829468)(Tnode293802* n0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!((*n0).kind == ((Tnodekind293020) 1))) goto LA3; result0 = NIM_FALSE; goto BeforeRet; } LA3: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = isinvalidreturntype_534548_839829468((*n0).typ); if (!LOC7) goto LA8; result0 = NIM_FALSE; goto BeforeRet; } LA8: ; result0 = NIM_TRUE; }BeforeRet: ; return result0; } N_NIMCALL(void, genasgncall_544695_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* d0) { { Ttype293840* LOC3; LOC3 = (Ttype293840*)0; LOC3 = skiptypes_297099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention293002) 8))) goto LA4; genclosurecall_541452_839829468(p0, le0, ri0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_542929_839829468(p0, le0, ri0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_543616_839829468(p0, ri0, d0); } goto LA1; LA14: ; { genprefixcall_540960_839829468(p0, le0, ri0, d0); } LA1: ; poststmtactions_533942_839829468(p0); } static N_INLINE(void, loadinto_544928_839829468)(Tcproc530021* p0, Tnode293802* le0, Tnode293802* ri0, Tloc293816* a0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = ((*ri0).kind == ((Tnodekind293020) 27) || (*ri0).kind == ((Tnodekind293020) 29) || (*ri0).kind == ((Tnodekind293020) 30) || (*ri0).kind == ((Tnodekind293020) 31) || (*ri0).kind == ((Tnodekind293020) 26) || (*ri0).kind == ((Tnodekind293020) 28) || (*ri0).kind == ((Tnodekind293020) 32)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3))); if (LOC5) goto LA6; LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic293524) 0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; genasgncall_544695_839829468(p0, le0, ri0, a0); } goto LA1; LA7: ; { if (!((*ri0).kind == ((Tnodekind293020) 47) || (*ri0).kind == ((Tnodekind293020) 65))) goto LA10; genderef_544921_839829468(p0, ri0, a0, NIM_TRUE); } goto LA1; LA10: ; { expr_540248_839829468(p0, ri0, a0); } LA1: ; } N_NIMCALL(void, gensinglevar_545276_839829468)(Tcproc530021* p0, Tnode293802* a0) { Tsym293834* v0; Tcproc530021* targetproc0; { v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag293184) 30))&31U)))!=0)) goto LA7; gengotovar_545258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]); } LA7: ; goto BeforeRet; } LA3: ; targetproc0 = p0; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag293184) 3))&31U)))!=0)) goto LA11; { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = (((*v0).flags & 96) == 32); if (!(LOC16)) goto LA17; LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 1)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*v0).loc.flags & 72) == 0)); LA18: ; if (!LOC15) goto LA19; goto BeforeRet; } LA19: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag293184) 9))&31U)))!=0)) goto LA23; targetproc0 = (*(*p0).module).preinitproc; } LA23: ; assignglobalvar_539819_839829468(targetproc0, v0); genobjectinit_539242_839829468((*(*p0).module).preinitproc, ((Tcprocsection530011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag293184) 6))&31U)))!=0); if (!(LOC27)) goto LA28; LOC27 = !((generatedheader_533201_839829468 == NIM_NIL)); LA28: ; if (!LOC27) goto LA29; genvarprototypeaux_545254_839829468(generatedheader_533201_839829468, v0); } LA29: ; registergcroot_544762_839829468(p0, v0); } goto LA9; LA11: ; { Tnode293802* value0; NIM_BOOL imm0; value0 = (*a0).kindU.S6.sons->data[((NI) 2)]; imm0 = isassignedimmediately_544781_839829468(value0); { NIM_BOOL LOC34; NIM_BOOL LOC35; NIM_BOOL LOC36; NIM_BOOL LOC38; NIM_BOOL LOC42; Ropeobj179006* decl0; Tloc293816 tmp0; LOC34 = (NIM_BOOL)0; LOC35 = (NIM_BOOL)0; LOC36 = (NIM_BOOL)0; LOC36 = imm0; if (!(LOC36)) goto LA37; LOC38 = (NIM_BOOL)0; LOC38 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC38) goto LA39; LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA39: ; LOC36 = LOC38; LA37: ; LOC35 = LOC36; if (!(LOC35)) goto LA40; LOC35 = ((*p0).splitdecls == ((NI) 0)); LA40: ; LOC34 = LOC35; if (!(LOC34)) goto LA41; LOC42 = (NIM_BOOL)0; LOC42 = containshiddenpointer_321120_3876443242((*v0).typ); LOC34 = !(LOC42); LA41: ; if (!LOC34) goto LA43; genlinedir_533823_839829468(p0, a0); decl0 = localvardecl_539532_839829468(p0, v0); memset((void*)(&tmp0), 0, sizeof(tmp0)); { NIM_BOOL LOC47; NIM_BOOL LOC48; Tnode293802* LOC50; Tnode293802* LOC52; Ropeobj179006* params0; Ttype293840* typ0; TY533811 LOC66; LOC47 = (NIM_BOOL)0; LOC48 = (NIM_BOOL)0; LOC48 = ((*value0).kind == ((Tnodekind293020) 27) || (*value0).kind == ((Tnodekind293020) 29) || (*value0).kind == ((Tnodekind293020) 30) || (*value0).kind == ((Tnodekind293020) 31) || (*value0).kind == ((Tnodekind293020) 26) || (*value0).kind == ((Tnodekind293020) 28) || (*value0).kind == ((Tnodekind293020) 32)); if (!(LOC48)) goto LA49; LOC50 = (Tnode293802*)0; LOC50 = HEX5BHEX5D_294238_850551059(value0, ((NI) 0)); LOC48 = ((*LOC50).kind == ((Tnodekind293020) 3)); LA49: ; LOC47 = LOC48; if (!(LOC47)) goto LA51; LOC52 = (Tnode293802*)0; LOC52 = HEX5BHEX5D_294238_850551059(value0, ((NI) 0)); LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 24))&31U)))!=0); LA51: ; if (!LOC47) goto LA53; params0 = (Ropeobj179006*)0; typ0 = skiptypes_297099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NI i_545619_839829468; NI HEX3Atmp_545825_839829468; NI LOC56; NI res_545828_839829468; i_545619_839829468 = (NI)0; HEX3Atmp_545825_839829468 = (NI)0; LOC56 = (NI)0; LOC56 = len_294081_850551059(value0); HEX3Atmp_545825_839829468 = (LOC56 - 1); res_545828_839829468 = ((NI) 1); { while (1) { Ropeobj179006* LOC65; if (!(res_545828_839829468 <= HEX3Atmp_545825_839829468)) goto LA58; i_545619_839829468 = res_545828_839829468; { TY534289 LOC63; Ropeobj179006* LOC64; if (!!((params0 == NIM_NIL))) goto LA61; memset((void*)LOC63, 0, sizeof(LOC63)); LOC64 = (Ropeobj179006*)0; LOC64 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0); add_179482_2381377266(&params0, LOC64); } LA61: ; LOC65 = (Ropeobj179006*)0; LOC65 = genotherarg_540277_839829468(p0, value0, i_545619_839829468, typ0); add_179482_2381377266(&params0, LOC65); res_545828_839829468 += ((NI) 1); } LA58: ; } } memset((void*)LOC66, 0, sizeof(LOC66)); LOC66[0] = decl0; LOC66[1] = params0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2); } goto LA45; LA53: ; { TY533811 LOC68; initlocexprsingleuse_540289_839829468(p0, value0, (&tmp0)); memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = decl0; LOC68[1] = rdloc_539188_839829468(tmp0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2); } LA45: ; goto BeforeRet; } LA43: ; assignlocalvar_539614_839829468(p0, v0); initlocalvar_539398_839829468(p0, v0, imm0); } LA9: ; { if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 1)))) goto LA71; genlinedir_533823_839829468(targetproc0, a0); loadinto_544928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc)); } LA71: ; }BeforeRet: ; } N_NIMCALL(void, genclosurevar_545832_839829468)(Tcproc530021* p0, Tnode293802* a0) { NIM_BOOL immediateasgn0; immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 1))); { Tloc293816 v0; if (!immediateasgn0) goto LA3; memset((void*)(&v0), 0, sizeof(v0)); initlocexpr_540283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0)); genlinedir_533823_839829468(p0, a0); loadinto_544928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0)); } LA3: ; } N_NIMCALL(void, genvartuple_544794_839829468)(Tcproc530021* p0, Tnode293802* n0) { Tloc293816 tup0; Tloc293816 field0; NI L0; NIM_BOOL uselowering0; Ttype293840* t0; { memset((void*)(&tup0), 0, sizeof(tup0)); memset((void*)(&field0), 0, sizeof(field0)); { if (!!(((*n0).kind == ((Tnodekind293020) 36)))) goto LA3; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA3: ; L0 = sonslen_296351_850551059(n0); uselowering0 = NIM_FALSE; { NI i_544822_839829468; NI HEX3Atmp_544905_839829468; NI res_544908_839829468; i_544822_839829468 = (NI)0; HEX3Atmp_544905_839829468 = (NI)0; HEX3Atmp_544905_839829468 = (NI)(L0 - ((NI) 3)); res_544908_839829468 = ((NI) 0); { while (1) { if (!(res_544908_839829468 <= HEX3Atmp_544905_839829468)) goto LA7; i_544822_839829468 = res_544908_839829468; { Tnode293802* LOC10; LOC10 = (Tnode293802*)0; LOC10 = HEX5BHEX5D_294238_850551059(n0, i_544822_839829468); if (!!(((*LOC10).kind == ((Tnodekind293020) 3)))) goto LA11; uselowering0 = NIM_TRUE; goto LA5; } LA11: ; res_544908_839829468 += ((NI) 1); } LA7: ; } } LA5: ; { Tnode293802* LOC17; if (!uselowering0) goto LA15; LOC17 = (Tnode293802*)0; LOC17 = lowertupleunpacking_434037_2218250499(n0, (*p0).prc); genstmts_540244_839829468(p0, LOC17); goto BeforeRet; } LA15: ; genlinedir_533823_839829468(p0, n0); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0)); t0 = getuniquetype_529640_2036603609(tup0.t); { NI i_544846_839829468; NI HEX3Atmp_544914_839829468; NI res_544917_839829468; i_544846_839829468 = (NI)0; HEX3Atmp_544914_839829468 = (NI)0; HEX3Atmp_544914_839829468 = (NI)(L0 - ((NI) 3)); res_544917_839829468 = ((NI) 0); { while (1) { if (!(res_544917_839829468 <= HEX3Atmp_544914_839829468)) goto LA20; i_544846_839829468 = res_544917_839829468; { Tsym293834* v0; v0 = (*(*n0).kindU.S6.sons->data[i_544846_839829468]).kindU.S4.sym; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag293184) 23))&31U)))!=0)) goto LA24; goto LA21; } LA24: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag293184) 3))&31U)))!=0)) goto LA28; assignglobalvar_539819_839829468(p0, v0); genobjectinit_539242_839829468(p0, ((Tcprocsection530011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); registergcroot_544762_839829468(p0, v0); } goto LA26; LA28: ; { Tnode293802* LOC31; NIM_BOOL LOC32; assignlocalvar_539614_839829468(p0, v0); LOC31 = (Tnode293802*)0; LOC31 = HEX5BHEX5D_294238_850551059(n0, (NI)(L0 - ((NI) 1))); LOC32 = (NIM_BOOL)0; LOC32 = isassignedimmediately_544781_839829468(LOC31); initlocalvar_539398_839829468(p0, v0, LOC32); } LA26: ; initloc_533273_839829468((&field0), ((Tlockind293808) 6), (*t0).sons->data[i_544846_839829468], tup0.s); { TY533811 LOC37; if (!((*t0).kind == ((Ttypekind293244) 18))) goto LA35; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_539188_839829468(tup0); LOC37[1] = rope_179401_2381377266(((NI64) (i_544846_839829468))); field0.r = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2); } goto LA33; LA35: ; { TY533811 LOC43; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_544846_839829468]).kind == ((Tnodekind293020) 3)))) goto LA41; internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = rdloc_539188_839829468(tup0); LOC43[1] = manglerecfieldname_535361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_544846_839829468]).kindU.S4.sym, t0); field0.r = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2); } LA33: ; putlocintodest_540258_839829468(p0, (&(*v0).loc), field0); } LA21: ; res_544917_839829468 += ((NI) 1); } LA20: ; } } }BeforeRet: ; } N_NIMCALL(void, genvarstmt_545854_839829468)(Tcproc530021* p0, Tnode293802* n0) { { NI i_545869_839829468; NI HEX3Atmp_545902_839829468; NI LOC2; NI res_545905_839829468; i_545869_839829468 = (NI)0; HEX3Atmp_545902_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(n0); HEX3Atmp_545902_839829468 = (NI)(LOC2 - ((NI) 1)); res_545905_839829468 = ((NI) 0); { while (1) { if (!(res_545905_839829468 <= HEX3Atmp_545902_839829468)) goto LA4; i_545869_839829468 = res_545905_839829468; { Tnode293802* a0; a0 = (*n0).kindU.S6.sons->data[i_545869_839829468]; { if (!((*a0).kind == ((Tnodekind293020) 125))) goto LA8; goto LA5; } LA8: ; { if (!((*a0).kind == ((Tnodekind293020) 35))) goto LA12; { if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3))) goto LA16; gensinglevar_545276_839829468(p0, a0); } goto LA14; LA16: ; { genclosurevar_545832_839829468(p0, a0); } LA14: ; } goto LA10; LA12: ; { genvartuple_544794_839829468(p0, a0); } LA10: ; } LA5: ; res_545905_839829468 += ((NI) 1); } LA4: ; } } } static N_INLINE(NIM_BOOL, emitlazily_533248_839829468)(Tsym293834* s0) { NIM_BOOL result0; NIM_BOOL LOC1; Tsym293834* LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 2))&63U)))!=0); if (LOC1) goto LA2; LOC3 = (Tsym293834*)0; LOC3 = getmodule_300123_2984716966(s0); LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag293184) 25))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, genconststmt_545909_839829468)(Tcproc530021* p0, Tnode293802* t0) { { NI i_545924_839829468; NI HEX3Atmp_545975_839829468; NI LOC2; NI res_545978_839829468; i_545924_839829468 = (NI)0; HEX3Atmp_545975_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(t0); HEX3Atmp_545975_839829468 = (NI)(LOC2 - ((NI) 1)); res_545978_839829468 = ((NI) 0); { while (1) { if (!(res_545978_839829468 <= HEX3Atmp_545975_839829468)) goto LA4; i_545924_839829468 = res_545978_839829468; { Tnode293802* it0; Tsym293834* c0; it0 = (*t0).kindU.S6.sons->data[i_545924_839829468]; { if (!((*it0).kind == ((Tnodekind293020) 125))) goto LA8; goto LA5; } LA8: ; { if (!!(((*it0).kind == ((Tnodekind293020) 102)))) goto LA12; internalerror_197100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593)); } LA12: ; c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC16; LOC16 = (NIM_BOOL)0; LOC16 = containscompiletimeonly_329721_3876443242((*c0).typ); if (!LOC16) goto LA17; goto LA5; } goto LA14; LA17: ; { NIM_BOOL LOC20; NIM_BOOL LOC21; NI LOC24; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = ((*(*c0).typ).kind == ((Ttypekind293244) 4) || (*(*c0).typ).kind == ((Ttypekind293244) 16) || (*(*c0).typ).kind == ((Ttypekind293244) 19) || (*(*c0).typ).kind == ((Ttypekind293244) 18) || (*(*c0).typ).kind == ((Ttypekind293244) 24)); if (!(LOC21)) goto LA22; LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag293810) 3))&15U)))!=0)); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC24 = (NI)0; LOC24 = len_294081_850551059((*c0).ast); LOC20 = !((LOC24 == ((NI) 0))); LA23: ; if (!LOC20) goto LA25; { NIM_BOOL LOC29; LOC29 = (NIM_BOOL)0; LOC29 = emitlazily_533248_839829468(c0); if (!!(LOC29)) goto LA30; requestconstimpl_540240_839829468(p0, c0); } LA30: ; } goto LA14; LA25: ; LA14: ; } LA5: ; res_545978_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, gencasestringbranch_548100_839829468)(Tcproc530021* p0, Tnode293802* b0, Tloc293816 e0, Ropeobj179006* labl0, Ropeobj179006** branches0, NI branches0Len0) { Tloc293816 x0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); length0 = sonslen_296351_850551059(b0); { NI i_548122_839829468; NI HEX3Atmp_548409_839829468; NI res_548412_839829468; i_548122_839829468 = (NI)0; HEX3Atmp_548409_839829468 = (NI)0; HEX3Atmp_548409_839829468 = (NI)(length0 - ((NI) 2)); res_548412_839829468 = ((NI) 0); { while (1) { NI j0; NI64 LOC4; TY536238 LOC5; if (!(res_548412_839829468 <= HEX3Atmp_548409_839829468)) goto LA3; i_548122_839829468 = res_548412_839829468; initlocexpr_540283_839829468(p0, (*b0).kindU.S6.sons->data[i_548122_839829468], (&x0)); LOC4 = (NI64)0; LOC4 = hashstring_529100_2036603609((*(*b0).kindU.S6.sons->data[i_548122_839829468]).kindU.S3.strval); j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1)))))); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(e0); LOC5[1] = rdloc_539188_839829468(x0); LOC5[2] = labl0; appcg_533632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3); res_548412_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, exprblock_545103_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { TY534289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); expr_540248_839829468(p0, n0, d0); endblock_545060_839829468(p0); } N_NIMCALL(Ropeobj179006*, gencasesecondpass_547965_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NI labid0, NI until0) { Ropeobj179006* result0; Ropeobj179006* lend0; result0 = (Ropeobj179006*)0; lend0 = getlabel_540217_839829468(p0); { NI i_547984_839829468; NI res_548017_839829468; i_547984_839829468 = (NI)0; res_548017_839829468 = ((NI) 1); { while (1) { TY179507 LOC10; if (!(res_548017_839829468 <= until0)) goto LA3; i_547984_839829468 = res_548017_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = ((*d0).k == ((Tlockind293808) 1)); if (!(LOC6)) goto LA7; LOC6 = isemptytype_298440_850551059((*t0).typ); LA7: ; if (!LOC6) goto LA8; (*d0).k = ((Tlockind293808) 0); } LA8: ; memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rope_179401_2381377266(((NI64) ((NI)(labid0 + i_547984_839829468)))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1); { NI length0; TY179507 LOC15; if (!((*(*t0).kindU.S6.sons->data[i_547984_839829468]).kind == ((Tnodekind293020) 85))) goto LA13; length0 = sonslen_296351_850551059((*t0).kindU.S6.sons->data[i_547984_839829468]); exprblock_545103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_547984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = lend0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1); } goto LA11; LA13: ; { exprblock_545103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_547984_839829468]).kindU.S6.sons->data[((NI) 0)], d0); } LA11: ; res_548017_839829468 += ((NI) 1); } LA3: ; } } result0 = lend0; return result0; } N_NIMCALL(void, gencasegenericbranch_547910_839829468)(Tcproc530021* p0, Tnode293802* b0, Tloc293816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj179006* labl0) { Tloc293816 x0; Tloc293816 y0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); length0 = sonslen_296351_850551059(b0); { NI i_547932_839829468; NI HEX3Atmp_547958_839829468; NI res_547961_839829468; i_547932_839829468 = (NI)0; HEX3Atmp_547958_839829468 = (NI)0; HEX3Atmp_547958_839829468 = (NI)(length0 - ((NI) 2)); res_547961_839829468 = ((NI) 0); { while (1) { if (!(res_547961_839829468 <= HEX3Atmp_547958_839829468)) goto LA3; i_547932_839829468 = res_547961_839829468; { TY536235 LOC8; if (!((*(*b0).kindU.S6.sons->data[i_547932_839829468]).kind == ((Tnodekind293020) 44))) goto LA6; initlocexpr_540283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_547932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_540283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_547932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdcharloc_539227_839829468(e0); LOC8[1] = rdcharloc_539227_839829468(x0); LOC8[2] = rdcharloc_539227_839829468(y0); LOC8[3] = labl0; linecg_533707_839829468(p0, ((Tcprocsection530011) 2), rangeformat0, LOC8, 4); } goto LA4; LA6: ; { TY536238 LOC10; initlocexpr_540283_839829468(p0, (*b0).kindU.S6.sons->data[i_547932_839829468], (&x0)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdcharloc_539227_839829468(e0); LOC10[1] = rdcharloc_539227_839829468(x0); LOC10[2] = labl0; linecg_533707_839829468(p0, ((Tcprocsection530011) 2), eqformat0, LOC10, 3); } LA4: ; res_547961_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(Ropeobj179006*, genifforcaseuntil_548021_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc293816 a0) { Ropeobj179006* result0; NI labid0; result0 = (Ropeobj179006*)0; labid0 = (*p0).labels; { NI i_548042_839829468; NI res_548083_839829468; i_548042_839829468 = (NI)0; res_548083_839829468 = ((NI) 1); { while (1) { if (!(res_548083_839829468 <= until0)) goto LA3; i_548042_839829468 = res_548083_839829468; (*p0).labels += ((NI) 1); { Ropeobj179006* LOC8; Ropeobj179006* LOC9; if (!((*(*t0).kindU.S6.sons->data[i_548042_839829468]).kind == ((Tnodekind293020) 85))) goto LA6; LOC8 = (Ropeobj179006*)0; LOC8 = rope_179401_2381377266(((NI64) ((*p0).labels))); LOC9 = (Ropeobj179006*)0; LOC9 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_296), LOC8); gencasegenericbranch_547910_839829468(p0, (*t0).kindU.S6.sons->data[i_548042_839829468], a0, rangeformat0, eqformat0, LOC9); } goto LA4; LA6: ; { TY179507 LOC11; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rope_179401_2381377266(((NI64) ((*p0).labels))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1); } LA4: ; res_548083_839829468 += ((NI) 1); } LA3: ; } } { NI LOC14; NI gototarget0; TY179507 LOC17; TY179507 LOC18; LOC14 = (NI)0; LOC14 = len_294081_850551059(t0); if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15; (*p0).labels += ((NI) 1); gototarget0 = (*p0).labels; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rope_179401_2381377266(((NI64) (gototarget0))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1); result0 = gencasesecondpass_547965_839829468(p0, t0, d0, ((NI) (labid0)), until0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_179401_2381377266(((NI64) (gototarget0))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1); } goto LA12; LA15: ; { result0 = gencasesecondpass_547965_839829468(p0, t0, d0, ((NI) (labid0)), until0); } LA12: ; return result0; } N_NIMCALL(void, gencasegeneric_548087_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) { Tloc293816 a0; Ropeobj179006* lend0; NI LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (NI)0; LOC1 = sonslen_296351_850551059(t0); lend0 = genifforcaseuntil_548021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0); fixlabel_540230_839829468(p0, lend0); } N_NIMCALL(void, genstringcase_548416_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { NI strings0; strings0 = ((NI) 0); { NI i_548434_839829468; NI HEX3Atmp_548549_839829468; NI LOC2; NI res_548552_839829468; i_548434_839829468 = (NI)0; HEX3Atmp_548549_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(t0); HEX3Atmp_548549_839829468 = (NI)(LOC2 - ((NI) 1)); res_548552_839829468 = ((NI) 1); { while (1) { if (!(res_548552_839829468 <= HEX3Atmp_548549_839829468)) goto LA4; i_548434_839829468 = res_548552_839829468; { NI LOC9; if (!((*(*t0).kindU.S6.sons->data[i_548434_839829468]).kind == ((Tnodekind293020) 85))) goto LA7; LOC9 = (NI)0; LOC9 = sonslen_296351_850551059((*t0).kindU.S6.sons->data[i_548434_839829468]); strings0 += (NI)(LOC9 - ((NI) 1)); } LA7: ; res_548552_839829468 += ((NI) 1); } LA4: ; } } { NI bitmask0; NI LOC14; TY192350* branches0; Tloc293816 a0; NI labid0; TY533811 LOC26; TY534289 LOC35; Ropeobj179006* lend0; NI LOC42; if (!(((NI) 8) < strings0)) goto LA12; LOC14 = (NI)0; LOC14 = nextpoweroftwo_101629_1009420244(strings0); bitmask0 = (NI)(LOC14 - ((NI) 1)); branches0 = (TY192350*)0; branches0 = (TY192350*) newSeq((&NTI192350), ((NI) ((NI)(bitmask0 + ((NI) 1))))); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); labid0 = (*p0).labels; { NI i_548483_839829468; NI HEX3Atmp_548559_839829468; NI LOC16; NI res_548562_839829468; i_548483_839829468 = (NI)0; HEX3Atmp_548559_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_296351_850551059(t0); HEX3Atmp_548559_839829468 = (NI)(LOC16 - ((NI) 1)); res_548562_839829468 = ((NI) 1); { while (1) { if (!(res_548562_839829468 <= HEX3Atmp_548559_839829468)) goto LA18; i_548483_839829468 = res_548562_839829468; (*p0).labels += ((NI) 1); { Ropeobj179006* LOC23; Ropeobj179006* LOC24; if (!((*(*t0).kindU.S6.sons->data[i_548483_839829468]).kind == ((Tnodekind293020) 85))) goto LA21; LOC23 = (Ropeobj179006*)0; LOC23 = rope_179401_2381377266(((NI64) ((*p0).labels))); LOC24 = (Ropeobj179006*)0; LOC24 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_296), LOC23); gencasestringbranch_548100_839829468(p0, (*t0).kindU.S6.sons->data[i_548483_839829468], a0, LOC24, branches0->data, branches0->Sup.len); } goto LA19; LA21: ; { } LA19: ; res_548562_839829468 += ((NI) 1); } LA18: ; } } memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_539188_839829468(a0); LOC26[1] = rope_179401_2381377266(((NI64) (bitmask0))); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2); { NI j_548517_839829468; NI HEX3Atmp_548567_839829468; NI res_548570_839829468; j_548517_839829468 = (NI)0; HEX3Atmp_548567_839829468 = (NI)0; HEX3Atmp_548567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1); res_548570_839829468 = ((NI) 0); { while (1) { if (!(res_548570_839829468 <= HEX3Atmp_548567_839829468)) goto LA29; j_548517_839829468 = res_548570_839829468; { TY533811 LOC34; if (!!((branches0->data[j_548517_839829468] == NIM_NIL))) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = intliteral_540270_839829468(((NI64) (j_548517_839829468))); LOC34[1] = branches0->data[j_548517_839829468]; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2); } LA32: ; res_548570_839829468 += ((NI) 1); } LA29: ; } } memset((void*)LOC35, 0, sizeof(LOC35)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0); { NI LOC38; TY179507 LOC41; LOC38 = (NI)0; LOC38 = sonslen_296351_850551059(t0); if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind293020) 85)))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = rope_179401_2381377266(((NI64) ((*p0).labels))); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1); } LA39: ; LOC42 = (NI)0; LOC42 = sonslen_296351_850551059(t0); lend0 = gencasesecondpass_547965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1))); fixlabel_540230_839829468(p0, lend0); } goto LA10; LA12: ; { gencasegeneric_548087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595)); } LA10: ; } N_NIMCALL(void, gengotoforcase_546673_839829468)(Tcproc530021* p0, Tnode293802* casestmt0) { { { NI i_546695_839829468; NI HEX3Atmp_546737_839829468; NI LOC2; NI res_546740_839829468; i_546695_839829468 = (NI)0; HEX3Atmp_546737_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_294081_850551059(casestmt0); HEX3Atmp_546737_839829468 = (LOC2 - 1); res_546740_839829468 = ((NI) 1); { while (1) { TY534289 LOC5; NI LOC6; Tnode293802* it0; Tnode293802* LOC16; if (!(res_546740_839829468 <= HEX3Atmp_546737_839829468)) goto LA4; i_546695_839829468 = res_546740_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NI)0; LOC6 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_546695_839829468]; { NI j_546711_839829468; NI HEX3Atmp_546730_839829468; NI LOC8; NI res_546733_839829468; j_546711_839829468 = (NI)0; HEX3Atmp_546730_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_294081_850551059(it0); HEX3Atmp_546730_839829468 = (NI)(LOC8 - ((NI) 2)); res_546733_839829468 = ((NI) 0); { while (1) { NI64 val0; TY179507 LOC15; if (!(res_546733_839829468 <= HEX3Atmp_546730_839829468)) goto LA10; j_546711_839829468 = res_546733_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_546711_839829468]).kind == ((Tnodekind293020) 44))) goto LA13; localerror_197085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA13: ; val0 = getordvalue_321129_3876443242((*it0).kindU.S6.sons->data[j_546711_839829468]); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rope_179401_2381377266(val0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1); res_546733_839829468 += ((NI) 1); } LA10: ; } } LOC16 = (Tnode293802*)0; LOC16 = lastson_296364_850551059(it0); genstmts_540244_839829468(p0, LOC16); endblock_545060_839829468(p0); res_546740_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; } N_NIMCALL(NIM_BOOL, branchhastoobigrange_548575_839829468)(Tnode293802* b0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { NI i_548590_839829468; NI HEX3Atmp_548608_839829468; NI LOC2; NI res_548611_839829468; i_548590_839829468 = (NI)0; HEX3Atmp_548608_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(b0); HEX3Atmp_548608_839829468 = (NI)(LOC2 - ((NI) 2)); res_548611_839829468 = ((NI) 0); { while (1) { if (!(res_548611_839829468 <= HEX3Atmp_548608_839829468)) goto LA4; i_548590_839829468 = res_548611_839829468; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*b0).kindU.S6.sons->data[i_548590_839829468]).kind == ((Tnodekind293020) 44)); if (!(LOC7)) goto LA8; LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_548590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_548590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval)); LA8: ; if (!LOC7) goto LA9; result0 = NIM_TRUE; goto BeforeRet; } LA9: ; res_548611_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; return result0; } N_NIMCALL(NI, ifswitchsplitpoint_548615_839829468)(Tcproc530021* p0, Tnode293802* n0) { NI result0; result0 = (NI)0; { NI i_548630_839829468; NI HEX3Atmp_548654_839829468; NI LOC2; NI res_548657_839829468; i_548630_839829468 = (NI)0; HEX3Atmp_548654_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_294081_850551059(n0); HEX3Atmp_548654_839829468 = (NI)(LOC2 - ((NI) 1)); res_548657_839829468 = ((NI) 1); { while (1) { Tnode293802* branch0; Tnode293802* stmtblock0; if (!(res_548657_839829468 <= HEX3Atmp_548654_839829468)) goto LA4; i_548630_839829468 = res_548657_839829468; branch0 = HEX5BHEX5D_294238_850551059(n0, i_548630_839829468); stmtblock0 = lastson_296364_850551059(branch0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = stmtscontainpragma_529083_2036603609(stmtblock0, ((Tspecialword276003) 181)); if (!LOC7) goto LA8; result0 = i_548630_839829468; } goto LA5; LA8: ; { if (!!(((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 0))&7U)))!=0))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ((*branch0).kind == ((Tnodekind293020) 85)); if (!(LOC15)) goto LA16; LOC15 = branchhastoobigrange_548575_839829468(branch0); LA16: ; if (!LOC15) goto LA17; result0 = i_548630_839829468; } LA17: ; } goto LA5; LA11: ; LA5: ; res_548657_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genordinalcase_548724_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { NI splitpoint0; Tloc293816 a0; Ropeobj179006* lend0; splitpoint0 = ifswitchsplitpoint_548615_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!(((NI) 0) < splitpoint0)) goto LA3; lend0 = genifforcaseuntil_548021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0); } goto LA1; LA3: ; { lend0 = NIM_NIL; } LA1: ; { NI LOC8; TY179507 LOC11; NIM_BOOL hasdefault0; TY534289 LOC37; LOC8 = (NI)0; LOC8 = len_294081_850551059(n0); if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdcharloc_539227_839829468(a0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1); hasdefault0 = NIM_FALSE; { NI i_548757_839829468; NI HEX3Atmp_548816_839829468; NI HEX3Atmp_548817_839829468; NI LOC13; NI res_548820_839829468; i_548757_839829468 = (NI)0; HEX3Atmp_548816_839829468 = (NI)0; HEX3Atmp_548817_839829468 = (NI)0; HEX3Atmp_548816_839829468 = (NI)(splitpoint0 + ((NI) 1)); LOC13 = (NI)0; LOC13 = len_294081_850551059(n0); HEX3Atmp_548817_839829468 = (LOC13 - 1); res_548820_839829468 = HEX3Atmp_548816_839829468; { while (1) { Tnode293802* branch0; Tnode293802* LOC28; TY534289 LOC29; if (!(res_548820_839829468 <= HEX3Atmp_548817_839829468)) goto LA15; i_548757_839829468 = res_548820_839829468; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = ((*d0).k == ((Tlockind293808) 1)); if (!(LOC18)) goto LA19; LOC18 = isemptytype_298440_850551059((*n0).typ); LA19: ; if (!LOC18) goto LA20; (*d0).k = ((Tlockind293808) 0); } LA20: ; branch0 = HEX5BHEX5D_294238_850551059(n0, i_548757_839829468); { if (!((*branch0).kind == ((Tnodekind293020) 85))) goto LA24; gencaserange_538028_839829468(p0, branch0); } goto LA22; LA24: ; { TY534289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0); hasdefault0 = NIM_TRUE; } LA22: ; LOC28 = (Tnode293802*)0; LOC28 = lastson_296364_850551059(branch0); exprblock_545103_839829468(p0, LOC28, d0); memset((void*)LOC29, 0, sizeof(LOC29)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0); res_548820_839829468 += ((NI) 1); } LA15: ; } } { NIM_BOOL LOC32; TY534289 LOC36; LOC32 = (NIM_BOOL)0; LOC32 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 3))&7U)))!=0); if (!(LOC32)) goto LA33; LOC32 = !(hasdefault0); LA33: ; if (!LOC32) goto LA34; memset((void*)LOC36, 0, sizeof(LOC36)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0); } LA34: ; memset((void*)LOC37, 0, sizeof(LOC37)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0); } LA9: ; { if (!!((lend0 == NIM_NIL))) goto LA40; fixlabel_540230_839829468(p0, lend0); } LA40: ; } N_NIMCALL(void, gencase_548826_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { Ttype293840* LOC8; genlinedir_533823_839829468(p0, t0); { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_298440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind293808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_538032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (Ttype293840*)0; LOC8 = skiptypes_297099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); switch ((*LOC8).kind) { case ((Ttypekind293244) 28): { genstringcase_548416_839829468(p0, t0, d0); } break; case ((Ttypekind293244) 36) ... ((Ttypekind293244) 39): { gencasegeneric_548087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601)); } break; default: { { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC14)) goto LA15; LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 30))&31U)))!=0); LA15: ; if (!LOC14) goto LA16; gengotoforcase_546673_839829468(p0, t0); } goto LA12; LA16: ; { genordinalcase_548724_839829468(p0, t0, d0); } LA12: ; } break; } } static N_INLINE(Tnode293802*, pop_319246_1689653243)(Tnodeseq293796** s0) { Tnode293802* result0; NI L0; result0 = (Tnode293802*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (Tnodeseq293796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode293802*), ((NI) (L0))); return result0; } N_NIMCALL(void, blockleaveactions_546442_839829468)(Tcproc530021* p0, NI howmanytrys0, NI howmanyexcepts0) { Tnodeseq293796* stack0; NI alreadypoppedcnt0; stack0 = (Tnodeseq293796*)0; stack0 = (Tnodeseq293796*) newSeq((&NTI293796), ((NI) 0)); alreadypoppedcnt0 = (*p0).inexceptblock; { NI i_546471_839829468; NI res_546596_839829468; i_546471_839829468 = (NI)0; res_546596_839829468 = ((NI) 1); { while (1) { Tnode293802* trystmt0; Tnode293802* finallystmt0; if (!(res_546596_839829468 <= howmanytrys0)) goto LA3; i_546471_839829468 = res_546596_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA7: ; if (!!(LOC6)) goto LA8; { if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12; alreadypoppedcnt0 -= ((NI) 1); } goto LA10; LA12: ; { TY534289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0); } LA10: ; } LA8: ; trystmt0 = pop_319246_1689653243((&(*p0).nestedtrystmts)); stack0 = (Tnodeseq293796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode293802*)); asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0); ++stack0->Sup.len; finallystmt0 = lastson_296364_850551059(trystmt0); { if (!((*finallystmt0).kind == ((Tnodekind293020) 107))) goto LA18; genstmts_540244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]); } LA18: ; res_546596_839829468 += ((NI) 1); } LA3: ; } } { NI i_546546_839829468; NI HEX3Atmp_546601_839829468; NI res_546604_839829468; i_546546_839829468 = (NI)0; HEX3Atmp_546601_839829468 = (NI)0; HEX3Atmp_546601_839829468 = (NI)(howmanytrys0 - ((NI) 1)); res_546604_839829468 = HEX3Atmp_546601_839829468; { while (1) { if (!(((NI) 0) <= res_546604_839829468)) goto LA22; i_546546_839829468 = res_546604_839829468; (*p0).nestedtrystmts = (Tnodeseq293796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode293802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_546546_839829468]); ++(*p0).nestedtrystmts->Sup.len; res_546604_839829468 -= ((NI) 1); } LA22: ; } } { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC25) goto LA26; LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA26: ; if (!!(LOC25)) goto LA27; { NI i_546587_839829468; NI HEX3Atmp_546610_839829468; NI res_546613_839829468; i_546587_839829468 = (NI)0; HEX3Atmp_546610_839829468 = (NI)0; HEX3Atmp_546610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1)); res_546613_839829468 = HEX3Atmp_546610_839829468; { while (1) { TY534289 LOC32; if (!(((NI) 0) <= res_546613_839829468)) goto LA31; i_546587_839829468 = res_546613_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0); res_546613_839829468 -= ((NI) 1); } LA31: ; } } } LA27: ; } N_NIMCALL(void, genreturnstmt_546617_839829468)(Tcproc530021* p0, Tnode293802* t0) { TY534289 LOC14; { { if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag293427) 14))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; (*p0).beforeretneeded = NIM_TRUE; genlinedir_533823_839829468(p0, t0); { if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)))) goto LA7; genstmts_540244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; blockleaveactions_546442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock); { Ropeobj179006* safepoint0; TY179507 LOC13; if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11; safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))]; memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1); } LA11: ; memset((void*)LOC14, 0, sizeof(LOC14)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0); }BeforeRet: ; } N_NIMCALL(void, genbreakstmt_547444_839829468)(Tcproc530021* p0, Tnode293802* t0) { NI idx0; Ropeobj179006* label0; TY179507 LOC16; idx0 = (*p0).breakidx; { Tsym293834* sym0; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)))) goto LA3; sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; idx0 = (NI)((*sym0).position - ((NI) 1)); } goto LA1; LA3: ; { { while (1) { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (((NI) 0) <= idx0); if (!(LOC8)) goto LA9; LOC8 = !((*p0).blocks->data[idx0].isloop); LA9: ; if (!LOC8) goto LA7; idx0 -= ((NI) 1); } LA7: ; } { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (idx0 < ((NI) 0)); if (LOC12) goto LA13; LOC12 = !((*p0).blocks->data[idx0].isloop); LA13: ; if (!LOC12) goto LA14; internalerror_197100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609)); } LA14: ; } LA1: ; label0 = assignlabel_545020_839829468((&(*p0).blocks->data[idx0])); blockleaveactions_546442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts)))); genlinedir_533823_839829468(p0, t0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = label0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1); } N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_550080_839829468)(Tcproc530021* p0, Tnode293802* asgn0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { Tnode293802* le0; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 2))&31U)))!=0)) goto LA3; le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)]; { Tsym293834* field0; if (!((*le0).kind == ((Tnodekind293020) 46))) goto LA7; field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag293184) 18))&31U)))!=0); } goto LA5; LA7: ; { Tsym293834* field0; if (!((*le0).kind == ((Tnodekind293020) 45))) goto LA10; field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag293184) 18))&31U)))!=0); } goto LA5; LA10: ; LA5: ; } LA3: ; return result0; } N_NIMCALL(Ropeobj179006*, discriminatortabledecl_537094_839829468)(Tcgen530027* m0, Ttype293840* objtype0, Tsym293834* d0) { Ropeobj179006* result0; Ropeobj179006* LOC1; Ropeobj179006* tmp0; TY533811 LOC2; NI64 LOC3; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_130)); tmp0 = discriminatortablename_537057_839829468(m0, objtype0, d0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = tmp0; LOC3 = (NI64)0; LOC3 = lengthord_321007_3876443242((*d0).typ); LOC2[1] = rope_179401_2381377266((NI64)(LOC3 + IL64(1))); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2); return result0; } N_NIMCALL(void, gendiscriminantcheck_550144_839829468)(Tcproc530021* p0, Tloc293816 a0, Tloc293816 tmp0, Ttype293840* objtype0, Tsym293834* field0) { Ttype293840* t0; Ropeobj179006* LOC1; NI64 L0; TY536235 LOC8; t0 = skiptypes_297099_850551059(objtype0, IL64(211106240964864)); LOC1 = (Ropeobj179006*)0; LOC1 = gentypeinfo_536941_839829468((*p0).module, t0); L0 = lengthord_321007_3876443242((*field0).typ); { NIM_BOOL LOC4; TY179507 LOC7; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_269862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id); if (!!(LOC4)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = discriminatortabledecl_537094_839829468((*p0).module, t0, field0); appcg_533640_839829468((*p0).module, ((Tcfilesection530005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1); } LA5: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_539188_839829468(a0); LOC8[1] = rdloc_539188_839829468(tmp0); LOC8[2] = discriminatortablename_537057_839829468((*p0).module, t0, field0); LOC8[3] = intliteral_540270_839829468((NI64)(L0 + IL64(1))); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4); } N_NIMCALL(void, asgnfielddiscriminant_550209_839829468)(Tcproc530021* p0, Tnode293802* e0) { Tloc293816 a0; Tloc293816 tmp0; Tnode293802* dotexpr0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)]; { if (!((*dotexpr0).kind == ((Tnodekind293020) 46))) goto LA3; dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); gettemp_538032_839829468(p0, a0.t, (&tmp0), NIM_FALSE); expr_540248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); gendiscriminantcheck_550144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym); genassignment_540264_839829468(p0, a0, tmp0, 0); } N_NIMCALL(void, genasgn_550239_839829468)(Tcproc530021* p0, Tnode293802* e0, NIM_BOOL fastasgn0) { genlinedir_533823_839829468(p0, e0); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 3)); if (!(LOC3)) goto LA4; LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag293184) 30))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; gengotovar_545258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA5: ; { NIM_BOOL LOC8; Tloc293816 a0; LOC8 = (NIM_BOOL)0; LOC8 = fielddiscriminantcheckneeded_550080_839829468(p0, e0); if (!!(LOC8)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); { Tnode293802* LOC13; Tnode293802* LOC16; LOC13 = (Tnode293802*)0; LOC13 = HEX5BHEX5D_294238_850551059(e0, ((NI) 0)); if (!((*LOC13).kind == ((Tnodekind293020) 47) || (*LOC13).kind == ((Tnodekind293020) 65))) goto LA14; LOC16 = (Tnode293802*)0; LOC16 = HEX5BHEX5D_294238_850551059(e0, ((NI) 0)); genderef_544921_839829468(p0, LOC16, (&a0), NIM_TRUE); } goto LA11; LA14: ; { initlocexpr_540283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA11: ; { if (!fastasgn0) goto LA20; a0.flags |= ((NU16)1)<<((((Tlocflag293810) 2))%(sizeof(NU16)*8)); } LA20: ; loadinto_544928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); } goto LA1; LA9: ; { asgnfielddiscriminant_550209_839829468(p0, e0); } LA1: ; } N_NIMCALL(Ropeobj179006*, genasmoremitstmt_549529_839829468)(Tcproc530021* p0, Tnode293802* t0, NIM_BOOL isasmstmt0) { Ropeobj179006* result0; NimStringDesc* res0; result0 = (Ropeobj179006*)0; res0 = copyString(((NimStringDesc*) &T839829468_490)); { NI i_549547_839829468; NI HEX3Atmp_549644_839829468; NI LOC2; NI res_549647_839829468; i_549547_839829468 = (NI)0; HEX3Atmp_549644_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(t0); HEX3Atmp_549644_839829468 = (NI)(LOC2 - ((NI) 1)); res_549647_839829468 = ((NI) 0); { while (1) { if (!(res_549647_839829468 <= HEX3Atmp_549644_839829468)) goto LA4; i_549547_839829468 = res_549647_839829468; switch ((*(*t0).kindU.S6.sons->data[i_549547_839829468]).kind) { case ((Tnodekind293020) 20) ... ((Tnodekind293020) 22): { res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_549547_839829468]).kindU.S3.strval->Sup.len + 0); appendString(res0, (*(*t0).kindU.S6.sons->data[i_549547_839829468]).kindU.S3.strval); } break; case ((Tnodekind293020) 3): { Tsym293834* sym0; sym0 = (*(*t0).kindU.S6.sons->data[i_549547_839829468]).kindU.S4.sym; { Tloc293816 a0; Ropeobj179006* LOC11; NimStringDesc* LOC12; if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[i_549547_839829468], (&a0)); LOC11 = (Ropeobj179006*)0; LOC11 = rdloc_539188_839829468(a0); LOC12 = (NimStringDesc*)0; LOC12 = HEX24_179856_2381377266(LOC11); res0 = resizeString(res0, LOC12->Sup.len + 0); appendString(res0, LOC12); } goto LA7; LA9: ; { Ropeobj179006* LOC16; NimStringDesc* LOC17; if (!((*sym0).kind == ((Tsymkind293435) 7))) goto LA14; LOC16 = (Ropeobj179006*)0; LOC16 = gettypedesc_536671_839829468((*p0).module, (*sym0).typ); LOC17 = (NimStringDesc*)0; LOC17 = HEX24_179856_2381377266(LOC16); res0 = resizeString(res0, LOC17->Sup.len + 0); appendString(res0, LOC17); } goto LA7; LA14: ; { Ropeobj179006* r0; NimStringDesc* LOC23; r0 = (*sym0).loc.r; { if (!(r0 == NIM_NIL)) goto LA21; r0 = manglename_534205_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), r0); } LA21: ; LOC23 = (NimStringDesc*)0; LOC23 = HEX24_179856_2381377266(r0); res0 = resizeString(res0, LOC23->Sup.len + 0); appendString(res0, LOC23); } LA7: ; } break; default: { internalerror_197100_155036129((*(*t0).kindU.S6.sons->data[i_549547_839829468]).info, ((NimStringDesc*) &T839829468_612)); } break; } res_549647_839829468 += ((NI) 1); } LA4: ; } } { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = isasmstmt0; if (!(LOC27)) goto LA28; LOC27 = ((Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop274004) 5))&7U)))!=0); LA28: ; if (!LOC27) goto LA29; { NimStringDesc* x_549604_839829468; NI first_549656_839829468; NI last_549658_839829468; x_549604_839829468 = (NimStringDesc*)0; first_549656_839829468 = ((NI) 0); last_549658_839829468 = ((NI) 0); { while (1) { NI j0; { while (1) { if (!!((((NU8)(res0->data[last_549658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_549658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_549658_839829468])) == ((NU8)(10))))) goto LA35; last_549658_839829468 += ((NI) 1); } LA35: ; } x_549604_839829468 = copyStrLast(res0, first_549656_839829468, (NI)(last_549658_839829468 - ((NI) 1))); j0 = ((NI) 0); { while (1) { if (!(((NU8)(x_549604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_549604_839829468->data[j0])) == ((NU8)(9)))) goto LA37; j0 += ((NI) 1); } LA37: ; } { if (!(((NU8)(x_549604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_549604_839829468->data[j0])) == ((NU8)(58)))) goto LA40; add_179487_2381377266(&result0, x_549604_839829468); add_179487_2381377266(&result0, tnl_177644_4151366050); } goto LA38; LA40: ; { if (!!(((NU8)(x_549604_839829468->data[j0]) == (NU8)(0)))) goto LA43; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_613)); add_179487_2381377266(&result0, x_549604_839829468); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_614)); } goto LA38; LA43: ; LA38: ; { if (!((NU8)(res0->data[last_549658_839829468]) == (NU8)(10))) goto LA47; last_549658_839829468 += ((NI) 1); } goto LA45; LA47: ; { if (!((NU8)(res0->data[last_549658_839829468]) == (NU8)(13))) goto LA50; last_549658_839829468 += ((NI) 1); { if (!((NU8)(res0->data[last_549658_839829468]) == (NU8)(10))) goto LA54; last_549658_839829468 += ((NI) 1); } LA54: ; } goto LA45; LA50: ; { goto LA32; } LA45: ; first_549656_839829468 = last_549658_839829468; } } LA32: ; } } goto LA25; LA29: ; { res0 = resizeString(res0, tnl_177644_4151366050->Sup.len + 0); appendString(res0, tnl_177644_4151366050); result0 = rope_179277_2381377266(res0); } LA25: ; return result0; } N_NIMCALL(void, genasmstmt_549659_839829468)(Tcproc530021* p0, Tnode293802* t0) { Ropeobj179006* s0; genlinedir_533823_839829468(p0, t0); s0 = genasmoremitstmt_549529_839829468(p0, t0, NIM_TRUE); { TY179507 LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = s0; addf_180205_2381377266(&(*(*p0).module).s[(((Tcfilesection530005) 7))- 0], Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field17, LOC5, 1); } goto LA1; LA3: ; { TY179507 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = s0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field17, LOC7, 1); } LA1: ; } static N_INLINE(void, gensimpleblock_545095_839829468)(Tcproc530021* p0, Tnode293802* stmts0) { TY534289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); genstmts_540244_839829468(p0, stmts0); endblock_545060_839829468(p0); } N_NIMCALL(void, gentrycpp_548865_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { Ropeobj179006* exc0; TY534289 LOC16; NI LOC17; NI length0; TY179507 LOC18; Ropeobj179006* LOC19; NI i0; NIM_BOOL catchallpresent0; TY534289 LOC78; Tnode293802* LOC79; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_298440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind293808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_538032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_533823_839829468(p0, t0); exc0 = gettempname_534596_839829468((*p0).module); { Tsym293834* LOC10; Ropeobj179006* LOC13; LOC10 = (Tsym293834*)0; LOC10 = getcompilerproc_339746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC10 == NIM_NIL))) goto LA11; LOC13 = (Ropeobj179006*)0; LOC13 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA8; LA11: ; { Ropeobj179006* LOC15; LOC15 = (Ropeobj179006*)0; LOC15 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA8: ; (*p0).nestedtrystmts = (Tnodeseq293796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode293802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (NI)0; LOC17 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0); expr_540248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); length0 = sonslen_296351_850551059(t0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = exc0; LOC19 = (Ropeobj179006*)0; LOC19 = ropecg_533407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1); endblock_545035_839829468(p0, LOC19); { TY534289 LOC24; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0)) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0); } LA22: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); catchallpresent0 = NIM_FALSE; { while (1) { NIM_BOOL LOC27; NI blen0; LOC27 = (NIM_BOOL)0; LOC27 = (i0 < length0); if (!(LOC27)) goto LA28; LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind293020) 87)); LA28: ; if (!LOC27) goto LA26; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = ((*d0).k == ((Tlockind293808) 1)); if (!(LOC31)) goto LA32; LOC31 = isemptytype_298440_850551059((*t0).typ); LA32: ; if (!LOC31) goto LA33; (*d0).k = ((Tlockind293808) 0); } LA33: ; blen0 = sonslen_296351_850551059((*t0).kindU.S6.sons->data[i0]); { Ropeobj179006** LOC39; TY534289 LOC40; if (!(((NI) 1) < i0)) goto LA37; LOC39 = (Ropeobj179006**)0; LOC39 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); memset((void*)LOC40, 0, sizeof(LOC40)); addf_180205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0); } LA37: ; { TY534289 LOC45; NI LOC46; TY534289 LOC47; if (!(blen0 == ((NI) 1))) goto LA43; catchallpresent0 = NIM_TRUE; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_540248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0); endblock_545060_839829468(p0); } goto LA41; LA43: ; { Ropeobj179006* orexpr0; TY179507 LOC57; TY534289 LOC58; NI LOC59; TY534289 LOC60; orexpr0 = NIM_NIL; { NI j_548978_839829468; NI HEX3Atmp_549101_839829468; NI res_549104_839829468; j_548978_839829468 = (NI)0; HEX3Atmp_549101_839829468 = (NI)0; HEX3Atmp_549101_839829468 = (NI)(blen0 - ((NI) 2)); res_549104_839829468 = ((NI) 0); { while (1) { TY533811 LOC56; if (!(res_549104_839829468 <= HEX3Atmp_549101_839829468)) goto LA51; j_548978_839829468 = res_549104_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA54; add_179487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA54: ; memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = exc0; LOC56[1] = gentypeinfo_536941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548978_839829468]).typ); appcg_533632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2); res_549104_839829468 += ((NI) 1); } LA51: ; } } memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = orexpr0; linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1); memset((void*)LOC58, 0, sizeof(LOC58)); LOC59 = (NI)0; LOC59 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0); expr_540248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC60, 0, sizeof(LOC60)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0); endblock_545060_839829468(p0); } LA41: ; i0 += ((NI) 1); } LA26: ; } { TY534289 LOC70; NI LOC71; Tnode293802* finallyblock0; TY534289 LOC76; Ropeobj179006* LOC77; if (!!(catchallpresent0)) goto LA63; { TY534289 LOC69; if (!(((NI) 1) < i0)) goto LA67; memset((void*)LOC69, 0, sizeof(LOC69)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0); } LA67: ; memset((void*)LOC70, 0, sizeof(LOC70)); LOC71 = (NI)0; LOC71 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0); finallyblock0 = lastson_296364_850551059(t0); { if (!((*finallyblock0).kind == ((Tnodekind293020) 107))) goto LA74; genstmts_540244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA74: ; memset((void*)LOC76, 0, sizeof(LOC76)); LOC77 = (Ropeobj179006*)0; LOC77 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0); line_533690_839829468(p0, ((Tcprocsection530011) 2), LOC77); endblock_545060_839829468(p0); } LA63: ; memset((void*)LOC78, 0, sizeof(LOC78)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0); (*p0).inexceptblock -= ((NI) 1); LOC79 = (Tnode293802*)0; LOC79 = pop_319246_1689653243((&(*p0).nestedtrystmts)); { NIM_BOOL LOC82; LOC82 = (NIM_BOOL)0; LOC82 = (i0 < length0); if (!(LOC82)) goto LA83; LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind293020) 107)); LA83: ; if (!LOC82) goto LA84; gensimpleblock_545095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); } LA84: ; } N_NIMCALL(void, line_533695_839829468)(Tcproc530021* p0, Tcprocsection530011 s0, NimStringDesc* r0) { Ropeobj179006** LOC1; Ropeobj179006* LOC2; Ropeobj179006* LOC3; LOC1 = (Ropeobj179006**)0; LOC1 = s_530179_3723162438(p0, s0); LOC2 = (Ropeobj179006*)0; LOC2 = rope_179277_2381377266(r0); LOC3 = (Ropeobj179006*)0; LOC3 = indentline_533656_839829468(p0, LOC2); add_179482_2381377266(LOC1, LOC3); } static N_INLINE(Ropeobj179006*, pop_179530_1689653243)(TY192350** s0) { Ropeobj179006* result0; NI L0; result0 = (Ropeobj179006*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (TY192350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj179006*), ((NI) (L0))); return result0; } N_NIMCALL(void, gentry_549114_839829468)(Tcproc530021* p0, Tnode293802* t0, Tloc293816* d0) { NIM_BOOL LOC8; Ropeobj179006* safepoint0; TY179507 LOC17; TY179507 LOC18; TY179507 LOC37; NI LOC38; NI length0; TY534289 LOC39; TY534289 LOC40; NI LOC41; TY534289 LOC42; NI i0; Tnode293802* LOC95; TY179507 LOC103; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_298440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind293808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_538032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (NIM_BOOL)0; LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624)); genlinedir_533823_839829468(p0, t0); safepoint0 = gettempname_534596_839829468((*p0).module); { Tsym293834* LOC11; Ropeobj179006* LOC14; LOC11 = (Tsym293834*)0; LOC11 = getcompilerproc_339746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC11 == NIM_NIL))) goto LA12; LOC14 = (Ropeobj179006*)0; LOC14 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA9; LA12: ; { Ropeobj179006* LOC16; LOC16 = (Ropeobj179006*)0; LOC16 = cgsym_533403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA9: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1); { NIM_BOOL LOC21; TY179507 LOC24; LOC21 = (NIM_BOOL)0; LOC21 = isdefined_201011_1967573533(((NimStringDesc*) &T839829468_627)); if (!LOC21) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1); } goto LA19; LA22: ; { NIM_BOOL LOC26; TY179507 LOC29; LOC26 = (NIM_BOOL)0; LOC26 = isdefined_201011_1967573533(((NimStringDesc*) &T839829468_629)); if (!LOC26) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1); } goto LA19; LA27: ; { NIM_BOOL LOC31; TY179507 LOC34; LOC31 = (NIM_BOOL)0; LOC31 = isdefined_201011_1967573533(((NimStringDesc*) &T839829468_631)); if (!LOC31) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1); } goto LA19; LA32: ; { TY179507 LOC36; memset((void*)LOC36, 0, sizeof(LOC36)); LOC36[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1); } LA19: ; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = safepoint0; LOC38 = (NI)0; LOC38 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1); length0 = sonslen_296351_850551059(t0); (*p0).nestedtrystmts = (Tnodeseq293796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode293802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; expr_540248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC39, 0, sizeof(LOC39)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0); endblock_545060_839829468(p0); memset((void*)LOC40, 0, sizeof(LOC40)); LOC41 = (NI)0; LOC41 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0); memset((void*)LOC42, 0, sizeof(LOC42)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0); { TY534289 LOC47; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0)) goto LA45; memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0); } LA45: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); { while (1) { NIM_BOOL LOC50; NI blen0; LOC50 = (NIM_BOOL)0; LOC50 = (i0 < length0); if (!(LOC50)) goto LA51; LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind293020) 87)); LA51: ; if (!LOC50) goto LA49; { NIM_BOOL LOC54; LOC54 = (NIM_BOOL)0; LOC54 = ((*d0).k == ((Tlockind293808) 1)); if (!(LOC54)) goto LA55; LOC54 = isemptytype_298440_850551059((*t0).typ); LA55: ; if (!LOC54) goto LA56; (*d0).k = ((Tlockind293808) 0); } LA56: ; blen0 = sonslen_296351_850551059((*t0).kindU.S6.sons->data[i0]); { TY534289 LOC67; NI LOC68; TY179507 LOC69; TY534289 LOC70; if (!(blen0 == ((NI) 1))) goto LA60; { TY534289 LOC66; if (!(((NI) 1) < i0)) goto LA64; memset((void*)LOC66, 0, sizeof(LOC66)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0); } LA64: ; memset((void*)LOC67, 0, sizeof(LOC67)); LOC68 = (NI)0; LOC68 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1); expr_540248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC70, 0, sizeof(LOC70)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0); endblock_545060_839829468(p0); } goto LA58; LA60: ; { Ropeobj179006* orexpr0; TY179507 LOC91; NI LOC92; TY179507 LOC93; TY534289 LOC94; orexpr0 = NIM_NIL; { NI j_549247_839829468; NI HEX3Atmp_549521_839829468; NI res_549524_839829468; j_549247_839829468 = (NI)0; HEX3Atmp_549521_839829468 = (NI)0; HEX3Atmp_549521_839829468 = (NI)(blen0 - ((NI) 2)); res_549524_839829468 = ((NI) 0); { while (1) { NimStringDesc* isobjformat0; TY179507 LOC86; if (!(res_549524_839829468 <= HEX3Atmp_549521_839829468)) goto LA74; j_549247_839829468 = res_549524_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA77; add_179487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA77: ; { NIM_BOOL LOC81; LOC81 = (NIM_BOOL)0; LOC81 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC81) goto LA82; LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA82: ; if (!!(LOC81)) goto LA83; isobjformat0 = copyString(((NimStringDesc*) &T839829468_637)); } goto LA79; LA83: ; { isobjformat0 = copyString(((NimStringDesc*) &T839829468_638)); } LA79: ; memset((void*)LOC86, 0, sizeof(LOC86)); LOC86[0] = gentypeinfo_536941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_549247_839829468]).typ); appcg_533632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1); res_549524_839829468 += ((NI) 1); } LA74: ; } } { if (!(((NI) 1) < i0)) goto LA89; line_533695_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_620)); } LA89: ; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = orexpr0; LOC92 = (NI)0; LOC92 = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1); memset((void*)LOC93, 0, sizeof(LOC93)); LOC93[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1); expr_540248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC94, 0, sizeof(LOC94)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0); endblock_545060_839829468(p0); } LA58: ; i0 += ((NI) 1); } LA49: ; } (*p0).inexceptblock -= ((NI) 1); LOC95 = (Tnode293802*)0; LOC95 = pop_319246_1689653243((&(*p0).nestedtrystmts)); endblock_545060_839829468(p0); { NIM_BOOL LOC98; Ropeobj179006* LOC102; LOC98 = (NIM_BOOL)0; LOC98 = (i0 < length0); if (!(LOC98)) goto LA99; LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind293020) 107)); LA99: ; if (!LOC98) goto LA100; (*p0).finallysafepoints = (TY192350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj179006*)); asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0); ++(*p0).finallysafepoints->Sup.len; gensimpleblock_545095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); LOC102 = (Ropeobj179006*)0; LOC102 = pop_179530_1689653243((&(*p0).finallysafepoints)); } LA100: ; memset((void*)LOC103, 0, sizeof(LOC103)); LOC103[0] = safepoint0; linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1); } N_NIMCALL(NimStringDesc*, getraisefrmt_547824_839829468)(Tcproc530021* p0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = copyString(((NimStringDesc*) &T839829468_641)); return result0; } N_NIMCALL(void, genraisestmt_547828_839829468)(Tcproc530021* p0, Tnode293802* t0) { { Tnode293802* finallyblock0; if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3; finallyblock0 = lastson_296364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]); { if (!((*finallyblock0).kind == ((Tnodekind293020) 107))) goto LA7; gensimpleblock_545095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; } LA3: ; { Tloc293816 a0; Ropeobj179006* e0; Ttype293840* typ0; NimStringDesc* LOC13; TY533811 LOC14; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)))) goto LA11; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); e0 = rdloc_539188_839829468(a0); typ0 = skiptypes_297099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320)); genlinedir_533823_839829468(p0, t0); LOC13 = (NimStringDesc*)0; LOC13 = getraisefrmt_547824_839829468(p0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = e0; LOC14[1] = makecstring_192638_155036129((*(*(*typ0).sym).name).s); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), LOC13, LOC14, 2); } goto LA9; LA11: ; { genlinedir_533823_839829468(p0, t0); { NIM_BOOL LOC18; NIM_BOOL LOC19; TY534289 LOC24; Ropeobj179006* LOC25; LOC18 = (NIM_BOOL)0; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA20: ; LOC18 = LOC19; if (!(LOC18)) goto LA21; LOC18 = !(((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 31))&63U)))!=0)); LA21: ; if (!LOC18) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC25 = (Ropeobj179006*)0; LOC25 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0); line_533690_839829468(p0, ((Tcprocsection530011) 2), LOC25); } goto LA16; LA22: ; { TY534289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0); } LA16: ; } LA9: ; } N_NIMCALL(void, gentypesection_539184_839829468)(Tcgen530027* m0, Tnode293802* n0) { } N_NIMCALL(Tcfilesection530005, determinesection_549819_839829468)(Tnode293802* n0) { Tcfilesection530005 result0; result0 = (Tcfilesection530005)0; result0 = ((Tcfilesection530005) 7); { NIM_BOOL LOC3; NI LOC4; NimStringDesc* sec0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_294081_850551059(n0); LOC3 = (((NI) 1) <= LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind293020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind293020) 22)); LA5: ; if (!LOC3) goto LA6; sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643)); if (!LOC10) goto LA11; result0 = ((Tcfilesection530005) 3); } goto LA8; LA11: ; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644)); if (!LOC14) goto LA15; result0 = ((Tcfilesection530005) 9); } goto LA8; LA15: ; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645)); if (!LOC18) goto LA19; result0 = ((Tcfilesection530005) 1); } goto LA8; LA19: ; LA8: ; } LA6: ; return result0; } N_NIMCALL(void, genemit_549839_839829468)(Tcproc530021* p0, Tnode293802* t0) { Ropeobj179006* s0; s0 = genasmoremitstmt_549529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE); { Tcfilesection530005 section0; Tnode293802* LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; LOC5 = (Tnode293802*)0; LOC5 = HEX5BHEX5D_294238_850551059(t0, ((NI) 1)); section0 = determinesection_549819_839829468(LOC5); genclinedir_533813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info); add_179482_2381377266(&(*(*p0).module).s[(section0)- 0], s0); } goto LA1; LA3: ; { genlinedir_533823_839829468(p0, t0); line_533690_839829468(p0, ((Tcprocsection530011) 2), s0); } LA1: ; } N_NIMCALL(void, genbreakpoint_549862_839829468)(Tcproc530021* p0, Tnode293802* t0) { NimStringDesc* name0; name0 = (NimStringDesc*)0; { TY536238 LOC12; NI LOC13; NimStringDesc* LOC14; if (!(((*p0).options &(1U<<((NU)(((Toption170009) 17))&31U)))!=0)) goto LA3; { if (!((*t0).kind == ((Tnodekind293020) 34))) goto LA7; name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval); } goto LA5; LA7: ; { NimStringDesc* LOC10; NimStringDesc* LOC11; breakpointid_549860_839829468 += ((NI) 1); LOC10 = (NimStringDesc*)0; LOC11 = (NimStringDesc*)0; LOC11 = nimIntToStr(breakpointid_549860_839829468); LOC10 = rawNewString(LOC11->Sup.len + 2); appendString(LOC10, ((NimStringDesc*) &T839829468_646)); appendString(LOC10, LOC11); name0 = LOC10; } LA5: ; genlinedir_533823_839829468(p0, t0); memset((void*)LOC12, 0, sizeof(LOC12)); LOC13 = (NI)0; LOC13 = tolinenumber_193415_155036129((*t0).info); LOC12[0] = rope_179401_2381377266(((NI64) (LOC13))); LOC14 = (NimStringDesc*)0; LOC14 = tofilename_193260_155036129((*t0).info.fileindex); LOC12[1] = makecstring_192638_155036129(LOC14); LOC12[2] = makecstring_192638_155036129(name0); appcg_533632_839829468((*p0).module, &gbreakpoints_549861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3); } LA3: ; } N_NIMCALL(void, genwatchpoint_550016_839829468)(Tcproc530021* p0, Tnode293802* n0) { Tloc293816 a0; Ttype293840* typ0; TY536238 LOC5; NimStringDesc* LOC6; { { if (!!((((*p0).options &(1U<<((NU)(((Toption170009) 17))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); typ0 = skiptypes_297099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = addrloc_539204_839829468(a0); LOC6 = (NimStringDesc*)0; LOC6 = rendertree_312044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0); LOC5[1] = makecstring_192638_155036129(LOC6); LOC5[2] = gentypeinfo_536941_839829468((*p0).module, typ0); linecg_533707_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3); }BeforeRet: ; } N_NIMCALL(void, genpragma_550039_839829468)(Tcproc530021* p_550041_839829468, Tnode293802* n0) { { NI i_550054_839829468; NI HEX3Atmp_550073_839829468; NI LOC2; NI res_550076_839829468; i_550054_839829468 = (NI)0; HEX3Atmp_550073_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_296351_850551059(n0); HEX3Atmp_550073_839829468 = (NI)(LOC2 - ((NI) 1)); res_550076_839829468 = ((NI) 0); { while (1) { Tnode293802* it0; Tspecialword276003 LOC5; if (!(res_550076_839829468 <= HEX3Atmp_550073_839829468)) goto LA4; i_550054_839829468 = res_550076_839829468; it0 = (*n0).kindU.S6.sons->data[i_550054_839829468]; LOC5 = (Tspecialword276003)0; LOC5 = whichpragma_319911_2616423590(it0); switch (LOC5) { case ((Tspecialword276003) 191): { genemit_549839_839829468(p_550041_839829468, it0); } break; case ((Tspecialword276003) 131): { genbreakpoint_549862_839829468(p_550041_839829468, it0); } break; case ((Tspecialword276003) 176): { genwatchpoint_550016_839829468(p_550041_839829468, it0); } break; case ((Tspecialword276003) 183): { Tcproc530021* p0; Ropeobj179006** LOC10; p0 = newproc_530206_3723162438(NIM_NIL, (*p_550041_839829468).module); (*p0).options = ((*p0).options & ~ 98304); genstmts_540244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]); LOC10 = (Ropeobj179006**)0; LOC10 = s_530179_3723162438(p0, ((Tcprocsection530011) 2)); asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10)); } break; default: { } break; } res_550076_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genparforstmt_547208_839829468)(Tcproc530021* p0, Tnode293802* t0) { NI oldbreakidx_547411_839829468; Tsym293834* forloopvar0; Tloc293816 rangea0; Tloc293816 rangeb0; Tnode293802* call0; TY536235 LOC1; NimStringDesc* LOC2; TY534289 LOC3; (*p0).withinloop += ((NI) 1); genlinedir_533823_839829468(p0, t0); oldbreakidx_547411_839829468 = (*p0).breakidx; forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)(&rangea0), 0, sizeof(rangea0)); memset((void*)(&rangeb0), 0, sizeof(rangeb0)); assignlocalvar_539614_839829468(p0, forloopvar0); call0 = (*t0).kindU.S6.sons->data[((NI) 1)]; initlocexpr_540283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0)); initlocexpr_540283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468((*forloopvar0).loc); LOC1[1] = rdloc_539188_839829468(rangea0); LOC1[2] = rdloc_539188_839829468(rangeb0); LOC2 = (NimStringDesc*)0; LOC2 = getstr_298230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]); LOC1[3] = rope_179277_2381377266(LOC2); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); (*p0).breakidx = startblock_544978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; genstmts_540244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]); endblock_545060_839829468(p0); (*p0).breakidx = oldbreakidx_547411_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, genstate_545117_839829468)(Tcproc530021* p0, Tnode293802* n0) { NI64 idx0; TY179507 LOC9; { NIM_BOOL LOC3; NI LOC4; NimStringDesc* LOC8; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_294081_850551059(n0); LOC3 = (LOC4 == ((NI) 1)); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 6)); LA5: ; if (!!(LOC3)) goto LA6; LOC8 = (NimStringDesc*)0; LOC8 = HEX24_197185_1689653243(T839829468_650); internalerror_197113_155036129(LOC8); } LA6: ; idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rope_179401_2381377266(idx0); linefmt_533714_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1); } N_NIMCALL(void, gengotostate_545144_839829468)(Tcproc530021* p0, Tnode293802* n0) { Tloc293816 a0; TY179507 LOC1; TY534289 LOC2; TY534289 LOC7; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_539188_839829468(a0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1); (*p0).beforeretneeded = NIM_TRUE; memset((void*)LOC2, 0, sizeof(LOC2)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0); { NI64 i_545214_839829468; NI64 HEX3Atmp_545223_839829468; NI64 res_545226_839829468; i_545214_839829468 = (NI64)0; HEX3Atmp_545223_839829468 = (NI64)0; HEX3Atmp_545223_839829468 = lastord_321004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ); res_545226_839829468 = IL64(0); { while (1) { TY179507 LOC6; if (!(res_545226_839829468 <= HEX3Atmp_545223_839829468)) goto LA5; i_545214_839829468 = res_545226_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_179401_2381377266(i_545214_839829468); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1); res_545226_839829468 += ((NI) 1); } LA5: ; } } memset((void*)LOC7, 0, sizeof(LOC7)); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0); } N_NIMCALL(void, genbreakstate_545229_839829468)(Tcproc530021* p0, Tnode293802* n0) { Tloc293816 a0; memset((void*)(&a0), 0, sizeof(a0)); { TY179507 LOC5; if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 155))) goto LA3; initlocexpr_540283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_539188_839829468(a0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1); } goto LA1; LA3: ; { TY179507 LOC7; initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_539188_839829468(a0); linef_533700_839829468(p0, ((Tcprocsection530011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1); } LA1: ; } N_NIMCALL(void, expr_540248_839829468)(Tcproc530021* p0, Tnode293802* n0, Tloc293816* d0) { switch ((*n0).kind) { case ((Tnodekind293020) 3): { Tsym293834* sym0; sym0 = (*n0).kindU.S4.sym; switch ((*sym0).kind) { case ((Tsymkind293435) 13): { { if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5; fillprocloc_540201_839829468(sym0); genprocprototype_540254_839829468((*p0).module, sym0); } goto LA3; LA5: ; { genproc_533951_839829468((*p0).module, sym0); } LA3: ; putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind293435) 12): case ((Tsymkind293435) 15): case ((Tsymkind293435) 14): { { NimStringDesc* LOC13; if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 23))&31U)))!=0)) goto LA11; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48); appendString(LOC13, ((NimStringDesc*) &T839829468_270)); appendString(LOC13, (*(*sym0).name).s); localerror_197085_155036129((*n0).info, LOC13); } LA11: ; genproc_533951_839829468((*p0).module, sym0); { NIM_BOOL LOC16; NimStringDesc* LOC20; LOC16 = (NIM_BOOL)0; LOC16 = ((*sym0).loc.r == NIM_NIL); if (LOC16) goto LA17; LOC16 = ((*sym0).loc.t == NIM_NIL); LA17: ; if (!LOC16) goto LA18; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC20, ((NimStringDesc*) &T839829468_271)); appendString(LOC20, (*(*sym0).name).s); internalerror_197100_155036129((*n0).info, LOC20); } LA18: ; putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind293435) 10): { { NIM_BOOL LOC24; Ropeobj179006* LOC27; LOC24 = (NIM_BOOL)0; LOC24 = issimpleconst_533311_839829468((*sym0).typ); if (!LOC24) goto LA25; LOC27 = (Ropeobj179006*)0; LOC27 = genliteral_550476_839829468(p0, (*sym0).ast, (*sym0).typ); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc293812) 1)); } goto LA22; LA25: ; { gencomplexconst_559249_839829468(p0, sym0, d0); } LA22: ; } break; case ((Tsymkind293435) 19): { Ropeobj179006* LOC30; LOC30 = (Ropeobj179006*)0; LOC30 = rope_179401_2381377266(((NI64) ((*sym0).position))); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc293812) 0)); } break; case ((Tsymkind293435) 8): case ((Tsymkind293435) 20): case ((Tsymkind293435) 11): case ((Tsymkind293435) 9): { { if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34; genvarprototype_540236_839829468((*p0).module, sym0); } LA34: ; { NIM_BOOL LOC38; NimStringDesc* LOC42; NimStringDesc* LOC43; LOC38 = (NIM_BOOL)0; LOC38 = ((*sym0).loc.r == NIM_NIL); if (LOC38) goto LA39; LOC38 = ((*sym0).loc.t == NIM_NIL); LA39: ; if (!LOC38) goto LA40; LOC42 = (NimStringDesc*)0; LOC43 = (NimStringDesc*)0; LOC43 = nimIntToStr((*sym0).Sup.id); LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20); appendString(LOC42, ((NimStringDesc*) &T839829468_285)); appendString(LOC42, (*(*sym0).name).s); appendString(LOC42, ((NimStringDesc*) &T839829468_12)); appendString(LOC42, LOC43); internalerror_197100_155036129((*n0).info, LOC42); } LA40: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag293184) 22))&31U)))!=0)) goto LA46; accessthreadlocalvar_533945_839829468(p0, sym0); { NIM_BOOL LOC50; Ropeobj179006* LOC53; LOC50 = (NIM_BOOL)0; LOC50 = emulatedthreadvars_533949_839829468(); if (!LOC50) goto LA51; LOC53 = (Ropeobj179006*)0; LOC53 = HEX26_179452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r); putintodest_551468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc293812) 0)); } goto LA48; LA51: ; { putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } LA48: ; } goto LA44; LA46: ; { putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } LA44: ; } break; case ((Tsymkind293435) 5): { { NIM_BOOL LOC59; NimStringDesc* LOC63; NimStringDesc* LOC64; LOC59 = (NIM_BOOL)0; LOC59 = ((*sym0).loc.r == NIM_NIL); if (LOC59) goto LA60; LOC59 = ((*sym0).loc.t == NIM_NIL); LA60: ; if (!LOC59) goto LA61; LOC63 = (NimStringDesc*)0; LOC64 = (NimStringDesc*)0; LOC64 = nimIntToStr((*sym0).Sup.id); LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21); appendString(LOC63, ((NimStringDesc*) &T839829468_289)); appendString(LOC63, (*(*sym0).name).s); appendString(LOC63, ((NimStringDesc*) &T839829468_12)); appendString(LOC63, LOC64); internalerror_197100_155036129((*n0).info, LOC63); } LA61: ; putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind293435) 3): { { NIM_BOOL LOC68; NimStringDesc* LOC72; NimStringDesc* LOC73; LOC68 = (NIM_BOOL)0; LOC68 = ((*sym0).loc.r == NIM_NIL); if (LOC68) goto LA69; LOC68 = ((*sym0).loc.t == NIM_NIL); LA69: ; if (!LOC68) goto LA70; LOC72 = (NimStringDesc*)0; LOC73 = (NimStringDesc*)0; LOC73 = nimIntToStr((*sym0).Sup.id); LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22); appendString(LOC72, ((NimStringDesc*) &T839829468_290)); appendString(LOC72, (*(*sym0).name).s); appendString(LOC72, ((NimStringDesc*) &T839829468_12)); appendString(LOC72, LOC73); internalerror_197100_155036129((*n0).info, LOC72); } LA70: ; putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } break; default: { NimStringDesc* LOC75; LOC75 = (NimStringDesc*)0; LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI293435))->Sup.len + 22); appendString(LOC75, ((NimStringDesc*) &T839829468_291)); appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI293435))); appendString(LOC75, ((NimStringDesc*) &T839829468_292)); internalerror_197100_155036129((*n0).info, LOC75); } break; } } break; case ((Tnodekind293020) 23): { { NIM_BOOL LOC79; Ropeobj179006* LOC82; LOC79 = (NIM_BOOL)0; LOC79 = isemptytype_298440_850551059((*n0).typ); if (!!(LOC79)) goto LA80; LOC82 = (Ropeobj179006*)0; LOC82 = genliteral_540273_839829468(p0, n0); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc293812) 0)); } LA80: ; } break; case ((Tnodekind293020) 20) ... ((Tnodekind293020) 22): { Ropeobj179006* LOC84; LOC84 = (Ropeobj179006*)0; LOC84 = genliteral_540273_839829468(p0, n0); putdataintodest_551436_839829468(p0, d0, (*n0).typ, LOC84); } break; case ((Tnodekind293020) 6) ... ((Tnodekind293020) 15): case ((Tnodekind293020) 16) ... ((Tnodekind293020) 19): case ((Tnodekind293020) 5): { Ropeobj179006* LOC86; LOC86 = (Ropeobj179006*)0; LOC86 = genliteral_540273_839829468(p0, n0); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc293812) 0)); } break; case ((Tnodekind293020) 27): case ((Tnodekind293020) 32): case ((Tnodekind293020) 29): case ((Tnodekind293020) 30): case ((Tnodekind293020) 31): case ((Tnodekind293020) 26): case ((Tnodekind293020) 28): { Tnode293802* op0; genlinedir_533823_839829468(p0, n0); op0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { Tloc293816 a0; if (!(*n0).typ == 0) goto LA90; memset((void*)(&a0), 0, sizeof(a0)); { NIM_BOOL LOC94; LOC94 = (NIM_BOOL)0; LOC94 = ((*op0).kind == ((Tnodekind293020) 3)); if (!(LOC94)) goto LA95; LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic293524) 0))); LA95: ; if (!LOC94) goto LA96; genmagicexpr_558033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic); } goto LA92; LA96: ; { gencall_544632_839829468(p0, n0, (&a0)); } LA92: ; } goto LA88; LA90: ; { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = ((*op0).kind == ((Tnodekind293020) 3)); if (!(LOC102)) goto LA103; LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic293524) 0))); LA103: ; if (!LOC102) goto LA104; genmagicexpr_558033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic); } goto LA100; LA104: ; { gencall_544632_839829468(p0, n0, d0); } LA100: ; } LA88: ; } break; case ((Tnodekind293020) 39): { { NIM_BOOL LOC110; NI LOC112; Ropeobj179006* LOC115; LOC110 = (NIM_BOOL)0; LOC110 = isdeepconstexpr_319566_2616423590(n0); if (!(LOC110)) goto LA111; LOC112 = (NI)0; LOC112 = len_294081_850551059(n0); LOC110 = !((LOC112 == ((NI) 0))); LA111: ; if (!LOC110) goto LA113; LOC115 = (Ropeobj179006*)0; LOC115 = gensetnode_550664_839829468(p0, n0); putintodest_551468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc293812) 0)); } goto LA108; LA113: ; { gensetconstr_558496_839829468(p0, n0, d0); } LA108: ; } break; case ((Tnodekind293020) 41): { { NIM_BOOL LOC120; NI LOC122; LOC120 = (NIM_BOOL)0; LOC120 = isdeepconstexpr_319566_2616423590(n0); if (!(LOC120)) goto LA121; LOC122 = (NI)0; LOC122 = len_294081_850551059(n0); LOC120 = !((LOC122 == ((NI) 0))); LA121: ; if (!LOC120) goto LA123; exprcomplexconst_559684_839829468(p0, n0, d0); } goto LA118; LA123: ; { Ttype293840* LOC126; LOC126 = (Ttype293840*)0; LOC126 = skiptypes_297099_850551059((*n0).typ, IL64(211106242013440)); if (!((*LOC126).kind == ((Ttypekind293244) 24))) goto LA127; genseqconstr_556004_839829468(p0, n0, d0); } goto LA118; LA127: ; { genarrayconstr_559207_839829468(p0, n0, d0); } LA118: ; } break; case ((Tnodekind293020) 37): { { NIM_BOOL LOC133; NI LOC135; LOC133 = (NIM_BOOL)0; LOC133 = isdeepconstexpr_319566_2616423590(n0); if (!(LOC133)) goto LA134; LOC135 = (NI)0; LOC135 = len_294081_850551059(n0); LOC133 = !((LOC135 == ((NI) 0))); LA134: ; if (!LOC133) goto LA136; exprcomplexconst_559684_839829468(p0, n0, d0); } goto LA131; LA136: ; { gentupleconstr_558618_839829468(p0, n0, d0); } LA131: ; } break; case ((Tnodekind293020) 38): { genobjconstr_555903_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 61): { gencast_557537_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 58): case ((Tnodekind293020) 59): case ((Tnodekind293020) 60): { genconv_557632_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 64): case ((Tnodekind293020) 63): { genaddr_554051_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 42): { genbracketexpr_555277_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 47): case ((Tnodekind293020) 65): { genderef_544921_839829468(p0, n0, d0, NIM_FALSE); } break; case ((Tnodekind293020) 45): { genrecordfield_554448_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 46): { gencheckedrecordfield_555046_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 127): case ((Tnodekind293020) 112): { genblock_547083_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 126): { genstmtlistexpr_559402_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 115): { { NI i_560023_839829468; NI HEX3Atmp_560276_839829468; NI LOC151; NI res_560279_839829468; i_560023_839829468 = (NI)0; HEX3Atmp_560276_839829468 = (NI)0; LOC151 = (NI)0; LOC151 = sonslen_296351_850551059(n0); HEX3Atmp_560276_839829468 = (NI)(LOC151 - ((NI) 1)); res_560279_839829468 = ((NI) 0); { while (1) { if (!(res_560279_839829468 <= HEX3Atmp_560276_839829468)) goto LA153; i_560023_839829468 = res_560279_839829468; genstmts_540244_839829468(p0, (*n0).kindU.S6.sons->data[i_560023_839829468]); res_560279_839829468 += ((NI) 1); } LA153: ; } } } break; case ((Tnodekind293020) 48): case ((Tnodekind293020) 92): { genif_545982_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 93): { expr_540248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0); } break; case ((Tnodekind293020) 66): { downconv_559581_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 67): { upconv_559431_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 68): { genrangechck_557590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563)); } break; case ((Tnodekind293020) 69): { genrangechck_557590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564)); } break; case ((Tnodekind293020) 70): { genrangechck_557590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565)); } break; case ((Tnodekind293020) 71): { convstrtocstr_557642_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 72): { convcstrtostr_557654_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 51): case ((Tnodekind293020) 52): { Tsym293834* sym0; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; genproc_533951_839829468((*p0).module, sym0); { NIM_BOOL LOC166; NimStringDesc* LOC170; LOC166 = (NIM_BOOL)0; LOC166 = ((*sym0).loc.r == NIM_NIL); if (LOC166) goto LA167; LOC166 = ((*sym0).loc.t == NIM_NIL); LA167: ; if (!LOC166) goto LA168; LOC170 = (NimStringDesc*)0; LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC170, ((NimStringDesc*) &T839829468_271)); appendString(LOC170, (*(*sym0).name).s); internalerror_197100_155036129((*n0).info, LOC170); } LA168: ; putlocintodest_540258_839829468(p0, d0, (*sym0).loc); } break; case ((Tnodekind293020) 155): { genclosure_558836_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 1): { } break; case ((Tnodekind293020) 96): { genwhilestmt_546984_839829468(p0, n0); } break; case ((Tnodekind293020) 99): case ((Tnodekind293020) 100): { genvarstmt_545854_839829468(p0, n0); } break; case ((Tnodekind293020) 101): { genconststmt_545909_839829468(p0, n0); } break; case ((Tnodekind293020) 94): { internalerror_197100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594)); } break; case ((Tnodekind293020) 97): { gencase_548826_839829468(p0, n0, d0); } break; case ((Tnodekind293020) 109): { genreturnstmt_546617_839829468(p0, n0); } break; case ((Tnodekind293020) 110): { genbreakstmt_547444_839829468(p0, n0); } break; case ((Tnodekind293020) 73): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag293427) 14))&15U)))!=0))) goto LA183; genasgn_550239_839829468(p0, n0, NIM_FALSE); } LA183: ; } break; case ((Tnodekind293020) 74): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag293427) 14))&15U)))!=0))) goto LA188; genasgn_550239_839829468(p0, n0, !(((*p0).prc == NIM_NIL))); } LA188: ; } break; case ((Tnodekind293020) 114): { { Tloc293816 a0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind293020) 1)))) goto LA193; genlinedir_533823_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_540283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA193: ; } break; case ((Tnodekind293020) 89): { genasmstmt_549659_839829468(p0, n0); } break; case ((Tnodekind293020) 106): { { NIM_BOOL LOC199; NIM_BOOL LOC200; LOC199 = (NIM_BOOL)0; LOC200 = (NIM_BOOL)0; LOC200 = (gcmd_170132_2607990831 == ((Tcommands170076) 2)); if (LOC200) goto LA201; LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA201: ; LOC199 = LOC200; if (!(LOC199)) goto LA202; LOC199 = !(((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 31))&63U)))!=0)); LA202: ; if (!LOC199) goto LA203; gentrycpp_548865_839829468(p0, n0, d0); } goto LA197; LA203: ; { gentry_549114_839829468(p0, n0, d0); } LA197: ; } break; case ((Tnodekind293020) 108): { genraisestmt_547828_839829468(p0, n0); } break; case ((Tnodekind293020) 98): { gentypesection_539184_839829468((*p0).module, n0); } break; case ((Tnodekind293020) 125): case ((Tnodekind293020) 84): case ((Tnodekind293020) 121): case ((Tnodekind293020) 116): case ((Tnodekind293020) 117): case ((Tnodekind293020) 118): case ((Tnodekind293020) 119): case ((Tnodekind293020) 120): case ((Tnodekind293020) 83): case ((Tnodekind293020) 82): { } break; case ((Tnodekind293020) 90): { genpragma_550039_839829468(p0, n0); } break; case ((Tnodekind293020) 91): { Tnode293802* LOC211; LOC211 = (Tnode293802*)0; LOC211 = lastson_296364_850551059(n0); expr_540248_839829468(p0, LOC211, d0); } break; case ((Tnodekind293020) 79): case ((Tnodekind293020) 80): case ((Tnodekind293020) 81): { { Tsym293834* prc0; if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind293020) 1))) goto LA215; prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC219; Tsym293834* LOC220; LOC219 = (NIM_BOOL)0; LOC220 = (Tsym293834*)0; LOC220 = skipgenericowner_298279_850551059(prc0); LOC219 = ((*LOC220).kind == ((Tsymkind293435) 6)); if (!(LOC219)) goto LA221; LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 23))&31U)))!=0)); LA221: ; if (!LOC219) goto LA222; { NIM_BOOL LOC226; NIM_BOOL LOC227; NIM_BOOL LOC228; NIM_BOOL LOC229; Tsym293834* LOC231; NIM_BOOL LOC234; LOC226 = (NIM_BOOL)0; LOC227 = (NIM_BOOL)0; LOC228 = (NIM_BOOL)0; LOC229 = (NIM_BOOL)0; LOC229 = !(((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 2))&63U)))!=0)); if (!(LOC229)) goto LA230; LOC231 = (Tsym293834*)0; LOC231 = getmodule_300123_2984716966(prc0); LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag293184) 25))&31U)))!=0)); LA230: ; LOC228 = LOC229; if (LOC228) goto LA232; LOC228 = ((65600 & (*prc0).flags) == 64); LA232: ; LOC227 = LOC228; if (LOC227) goto LA233; LOC234 = (NIM_BOOL)0; LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 6))&31U)))!=0); if (!(LOC234)) goto LA235; LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 5))&15U)))!=0); LA235: ; LOC227 = LOC234; LA233: ; LOC226 = LOC227; if (LOC226) goto LA236; LOC226 = ((*prc0).kind == ((Tsymkind293435) 13)); LA236: ; if (!LOC226) goto LA237; { NIM_BOOL LOC241; Tnode293802* LOC242; LOC241 = (NIM_BOOL)0; LOC242 = (Tnode293802*)0; LOC242 = getbody_336227_1724185294(prc0); LOC241 = !(((*LOC242).kind == ((Tnodekind293020) 1))); if (LOC241) goto LA243; LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag293810) 4))&15U)))!=0); LA243: ; if (!LOC241) goto LA244; genproc_533951_839829468((*p0).module, prc0); } LA244: ; } LA237: ; } LA222: ; } LA215: ; } break; case ((Tnodekind293020) 95): { genparforstmt_547208_839829468(p0, n0); } break; case ((Tnodekind293020) 157): { genstate_545117_839829468(p0, n0); } break; case ((Tnodekind293020) 156): { gengotostate_545144_839829468(p0, n0); } break; case ((Tnodekind293020) 158): { genbreakstate_545229_839829468(p0, n0); } break; default: { NimStringDesc* LOC251; LOC251 = (NimStringDesc*)0; LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI293020))->Sup.len + 25); appendString(LOC251, ((NimStringDesc*) &T839829468_291)); appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI293020))); appendString(LOC251, ((NimStringDesc*) &T839829468_657)); internalerror_197100_155036129((*n0).info, LOC251); } break; } } N_NIMCALL(void, genstmts_540244_839829468)(Tcproc530021* p0, Tnode293802* t0) { Tloc293816 a0; memset((void*)(&a0), 0, sizeof(a0)); expr_540248_839829468(p0, t0, (&a0)); { NimStringDesc* LOC5; if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_197185_1689653243(T839829468_658); internalerror_197113_155036129(LOC5); } LA3: ; } N_NIMCALL(Tnode293802*, myprocess_564402_839829468)(Tpasscontext342002* b0, Tnode293802* n0) { Tnode293802* result0; Tcgen530027* m0; { result0 = (Tnode293802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_342085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen530027*) (b0)); (*(*m0).initproc).options = initprocoptions_563635_839829468(m0); genstmts_540244_839829468((*m0).initproc, n0); }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj179006*, getsomeinitname_562904_839829468)(Tsym293834* m0, NimStringDesc* suffix0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { NimStringDesc* LOC5; if (!((12288 & (*m0).flags) == 0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_529847_2036603609((*(*(*m0).owner).name).s); result0 = rope_179277_2381377266(LOC5); add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_12)); } LA3: ; add_179487_2381377266(&result0, (*(*m0).name).s); add_179487_2381377266(&result0, suffix0); return result0; } N_NIMCALL(Ropeobj179006*, getinitname_563235_839829468)(Tsym293834* m0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = getsomeinitname_562904_839829468(m0, ((NimStringDesc*) &T839829468_659)); return result0; } N_NIMCALL(Ropeobj179006*, getdatinitname_563239_839829468)(Tsym293834* m0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = getsomeinitname_562904_839829468(m0, ((NimStringDesc*) &T839829468_660)); return result0; } N_NIMCALL(void, registermoduletomain_563243_839829468)(Tsym293834* m0) { Ropeobj179006* init0; Ropeobj179006* datinit0; TY179507 LOC1; TY179507 LOC2; init0 = getinitname_563235_839829468(m0); datinit0 = getdatinitname_563239_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = init0; addf_180205_2381377266(&mainmodprocs_530148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = datinit0; addf_180205_2381377266(&mainmodprocs_530148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1); { TY179507 LOC7; Ropeobj179006* initcall0; TY179507 LOC8; if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag293184) 13))&31U)))!=0))) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = datinit0; addf_180205_2381377266(&maindatinit_530151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = init0; initcall0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1); { if (!(((*m0).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)) goto LA11; add_179482_2381377266(&mainmodinit_530149_3723162438, initcall0); } goto LA9; LA11: ; { add_179482_2381377266(&othermodsinit_530150_3723162438, initcall0); } LA9: ; } LA5: ; } N_NIMCALL(Ropeobj179006*, genfilenames_562688_839829468)(Tcgen530027* m0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; LOC1 = (Ropeobj179006*)0; LOC1 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_673)); result0 = NIM_NIL; { NI i_562717_839829468; NI HEX3Atmp_562722_839829468; NI res_562725_839829468; i_562717_839829468 = (NI)0; HEX3Atmp_562722_839829468 = (NI)0; HEX3Atmp_562722_839829468 = ((fileinfos_192629_155036129 ? fileinfos_192629_155036129->Sup.len : 0) - 1); res_562725_839829468 = ((NI) 0); { while (1) { TY179507 LOC5; if (!(res_562725_839829468 <= HEX3Atmp_562722_839829468)) goto LA4; i_562717_839829468 = res_562725_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = makecstring_192638_155036129(fileinfos_192629_155036129->data[i_562717_839829468].projpath); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1); res_562725_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genmainproc_562729_839829468)(Tcgen530027* m0) { NimStringDesc* nimmain0; NimStringDesc* othermain0; Ropeobj179006* initstackbottomcall0; TY537475 LOC38; TY536238 LOC47; nimmain0 = (NimStringDesc*)0; othermain0 = (NimStringDesc*)0; { NIM_BOOL LOC3; NIM_BOOL LOC12; LOC3 = (NIM_BOOL)0; LOC3 = (targetos_177629_4151366050 == ((Tsystemos177004) 2)); if (!(LOC3)) goto LA4; LOC3 = !(((gglobaloptions_170130_2607990831 & 1280) == 0)); LA4: ; if (!LOC3) goto LA5; { if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 10))&63U)))!=0)) goto LA9; nimmain0 = copyString(((NimStringDesc*) &T839829468_663)); othermain0 = copyString(((NimStringDesc*) &T839829468_664)); } goto LA7; LA9: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_666)); } LA7: ; LOC12 = (NIM_BOOL)0; LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667)); } goto LA1; LA5: ; { if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 8))&63U)))!=0)) goto LA14; nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_668)); } goto LA1; LA14: ; { if (!(targetos_177629_4151366050 == ((Tsystemos177004) 24))) goto LA17; nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_670)); } goto LA1; LA17: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_671)); } LA1: ; { Ropeobj179006* LOC24; if (!!((gbreakpoints_549861_839829468 == NIM_NIL))) goto LA22; LOC24 = (Ropeobj179006*)0; LOC24 = cgsym_533403_839829468(m0, ((NimStringDesc*) &T839829468_672)); } LA22: ; { Ropeobj179006* LOC29; if (!((goptions_170128_2607990831 &(1U<<((NU)(((Toption170009) 17))&31U)))!=0)) goto LA27; LOC29 = (Ropeobj179006*)0; LOC29 = genfilenames_562688_839829468(m0); add_179482_2381377266(&gbreakpoints_549861_839829468, LOC29); } LA27: ; { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = (targetos_177629_4151366050 == ((Tsystemos177004) 24)); if (LOC32) goto LA33; LOC32 = (gselectedgc_170133_2607990831 == ((Tgcmode170080) 0)); LA33: ; if (!LOC32) goto LA34; initstackbottomcall0 = rope_179277_2381377266(((NimStringDesc*) &T839829468_490)); } goto LA30; LA34: ; { TY534289 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); initstackbottomcall0 = ropecg_533407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0); } LA30: ; (*m0).labels += ((NI) 1); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = maindatinit_530151_3723162438; LOC38[1] = gbreakpoints_549861_839829468; LOC38[2] = othermodsinit_530150_3723162438; { NIM_BOOL LOC41; TY534289 LOC45; LOC41 = (NIM_BOOL)0; LOC41 = emulatedthreadvars_533949_839829468(); if (!(LOC41)) goto LA42; LOC41 = !((targetos_177629_4151366050 == ((Tsystemos177004) 24))); LA42: ; if (!LOC41) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC38[3] = ropecg_533407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0); } goto LA39; LA43: ; { LOC38[3] = rope_179277_2381377266(((NimStringDesc*) &T839829468_490)); } LA39: ; LOC38[4] = initstackbottomcall0; appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = mainmodinit_530149_3723162438; LOC47[1] = initstackbottomcall0; LOC47[2] = rope_179401_2381377266(((NI64) ((*m0).labels))); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 10))- 0], nimmain0, LOC47, 3); { TY534289 LOC52; if (!!(((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 20))&63U)))!=0))) goto LA50; memset((void*)LOC52, 0, sizeof(LOC52)); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 10))- 0], othermain0, LOC52, 0); } LA50: ; } N_NIMCALL(Tnode293802*, myclose_564830_839829468)(Tpasscontext342002* b0, Tnode293802* n0) { Tnode293802* result0; Tcgen530027* m0; { result0 = (Tnode293802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_342085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen530027*) (b0)); { if (!!((n0 == NIM_NIL))) goto LA9; (*(*m0).initproc).options = initprocoptions_563635_839829468(m0); genstmts_540244_839829468((*m0).initproc, n0); } LA9: ; registermoduletomain_563243_839829468((*m0).module); { Tnode293802* disp0; if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)) goto LA13; (*m0).flags |= ((NU8)1)<<((((Codegenflag530025) 5))%(sizeof(NU8)*8)); disp0 = generatemethoddispatchers_433151_3853300031(); { NI i_564891_839829468; NI HEX3Atmp_564895_839829468; NI LOC16; NI res_564898_839829468; i_564891_839829468 = (NI)0; HEX3Atmp_564895_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_296351_850551059(disp0); HEX3Atmp_564895_839829468 = (NI)(LOC16 - ((NI) 1)); res_564898_839829468 = ((NI) 0); { while (1) { if (!(res_564898_839829468 <= HEX3Atmp_564895_839829468)) goto LA18; i_564891_839829468 = res_564898_839829468; genprocaux_561284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_564891_839829468]).kindU.S4.sym); res_564898_839829468 += ((NI) 1); } LA18: ; } } genmainproc_562729_839829468(m0); } LA13: ; }BeforeRet: ; return result0; } N_NIMCALL(void, finishmodule_564420_839829468)(Tcgen530027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Tsym293834* prc0; if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2; prc0 = (*m0).forwardedprocs->data[i0]; { NimStringDesc* LOC7; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag293184) 4))&31U)))!=0)) goto LA5; LOC7 = (NimStringDesc*)0; LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17); appendString(LOC7, ((NimStringDesc*) &T839829468_678)); appendString(LOC7, (*(*prc0).name).s); internalerror_197100_155036129((*prc0).info, LOC7); } LA5: ; genprocnoforward_561906_839829468(m0, prc0); i0 += ((NI) 1); } LA2: ; } gforwardedprocscounter_530171_3723162438 -= i0; (*m0).forwardedprocs = (Tsymseq293804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym293834*), ((NI) 0)); } N_NIMCALL(void, geninitcode_563286_839829468)(Tcgen530027* m0) { Ropeobj179006* initname0; Ropeobj179006* prc0; TY179507 LOC1; Ropeobj179006* LOC12; Ropeobj179006* LOC13; Ropeobj179006** LOC14; Ropeobj179006** LOC15; Ropeobj179006** LOC16; Ropeobj179006* LOC17; Ropeobj179006* LOC33; Ropeobj179006** LOC34; Ropeobj179006** LOC35; Ropeobj179006** LOC36; Ropeobj179006* LOC37; Ropeobj179006* LOC38; Ropeobj179006** LOC39; Ropeobj179006** LOC40; Ropeobj179006** LOC41; Ropeobj179006* LOC42; Ropeobj179006* LOC50; TY534289 LOC51; TY179507 LOC52; TY534289 LOC58; initname0 = getinitname_563235_839829468((*m0).module); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = initname0; prc0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1); { TY533811 LOC6; if (!(((NI) 0) < (*m0).typenodes)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = (*m0).typenodesname; LOC6[1] = rope_179401_2381377266(((NI64) ((*m0).typenodes))); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2); } LA4: ; { TY533811 LOC11; if (!(((NI) 0) < (*m0).nimtypes)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*m0).nimtypesname; LOC11[1] = rope_179401_2381377266(((NI64) ((*m0).nimtypes))); appcg_533632_839829468(m0, &(*m0).s[(((Tcfilesection530005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2); } LA9: ; LOC12 = (Ropeobj179006*)0; LOC12 = initgcframe_539435_839829468((*m0).initproc); add_179482_2381377266(&prc0, LOC12); LOC13 = (Ropeobj179006*)0; LOC13 = gensectionstart_531081_2760143328(((Tcprocsection530011) 0)); add_179482_2381377266(&prc0, LOC13); LOC14 = (Ropeobj179006**)0; LOC14 = s_530179_3723162438((*m0).preinitproc, ((Tcprocsection530011) 0)); add_179482_2381377266(&prc0, (*LOC14)); LOC15 = (Ropeobj179006**)0; LOC15 = s_530179_3723162438((*m0).initproc, ((Tcprocsection530011) 0)); add_179482_2381377266(&prc0, (*LOC15)); LOC16 = (Ropeobj179006**)0; LOC16 = s_530179_3723162438((*m0).postinitproc, ((Tcprocsection530011) 0)); add_179482_2381377266(&prc0, (*LOC16)); LOC17 = (Ropeobj179006*)0; LOC17 = gensectionend_531116_2760143328(((Tcprocsection530011) 0)); add_179482_2381377266(&prc0, LOC17); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0); if (!(LOC20)) goto LA21; LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag530025) 2))&7U)))!=0)); LA21: ; if (!LOC20) goto LA22; (*m0).flags |= ((NU8)1)<<((((Codegenflag530025) 2))%(sizeof(NU8)*8)); { Ropeobj179006* procname0; Ropeobj179006* LOC28; Ropeobj179006* LOC29; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag530025) 0))&7U)))!=0))) goto LA26; procname0 = makecstring_192638_155036129((*(*(*m0).module).name).s); LOC28 = (Ropeobj179006*)0; LOC28 = quotedfilename_197818_155036129((*(*m0).module).info); LOC29 = (Ropeobj179006*)0; LOC29 = initframe_561140_839829468((*m0).initproc, procname0, LOC28); add_179482_2381377266(&prc0, LOC29); } goto LA24; LA26: ; { TY534289 LOC31; Ropeobj179006* LOC32; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj179006*)0; LOC32 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0); add_179482_2381377266(&prc0, LOC32); } LA24: ; } LA22: ; LOC33 = (Ropeobj179006*)0; LOC33 = gensectionstart_531081_2760143328(((Tcprocsection530011) 1)); add_179482_2381377266(&prc0, LOC33); LOC34 = (Ropeobj179006**)0; LOC34 = s_530179_3723162438((*m0).preinitproc, ((Tcprocsection530011) 1)); add_179482_2381377266(&prc0, (*LOC34)); LOC35 = (Ropeobj179006**)0; LOC35 = s_530179_3723162438((*m0).initproc, ((Tcprocsection530011) 1)); add_179482_2381377266(&prc0, (*LOC35)); LOC36 = (Ropeobj179006**)0; LOC36 = s_530179_3723162438((*m0).postinitproc, ((Tcprocsection530011) 1)); add_179482_2381377266(&prc0, (*LOC36)); LOC37 = (Ropeobj179006*)0; LOC37 = gensectionend_531116_2760143328(((Tcprocsection530011) 1)); add_179482_2381377266(&prc0, LOC37); LOC38 = (Ropeobj179006*)0; LOC38 = gensectionstart_531081_2760143328(((Tcprocsection530011) 2)); add_179482_2381377266(&prc0, LOC38); LOC39 = (Ropeobj179006**)0; LOC39 = s_530179_3723162438((*m0).preinitproc, ((Tcprocsection530011) 2)); add_179482_2381377266(&prc0, (*LOC39)); LOC40 = (Ropeobj179006**)0; LOC40 = s_530179_3723162438((*m0).initproc, ((Tcprocsection530011) 2)); add_179482_2381377266(&prc0, (*LOC40)); LOC41 = (Ropeobj179006**)0; LOC41 = s_530179_3723162438((*m0).postinitproc, ((Tcprocsection530011) 2)); add_179482_2381377266(&prc0, (*LOC41)); LOC42 = (Ropeobj179006*)0; LOC42 = gensectionend_531116_2760143328(((Tcprocsection530011) 2)); add_179482_2381377266(&prc0, LOC42); { NIM_BOOL LOC45; Ropeobj179006* LOC49; LOC45 = (NIM_BOOL)0; LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption170009) 15))&31U)))!=0); if (!(LOC45)) goto LA46; LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag530025) 0))&7U)))!=0)); LA46: ; if (!LOC45) goto LA47; LOC49 = (Ropeobj179006*)0; LOC49 = deinitframe_561150_839829468((*m0).initproc); add_179482_2381377266(&prc0, LOC49); } LA47: ; LOC50 = (Ropeobj179006*)0; LOC50 = deinitgcframe_539441_839829468((*m0).initproc); add_179482_2381377266(&prc0, LOC50); memset((void*)LOC51, 0, sizeof(LOC51)); addf_180205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0); memset((void*)LOC52, 0, sizeof(LOC52)); LOC52[0] = getdatinitname_563239_839829468((*m0).module); addf_180205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1); { Tcfilesection530005 i_563401_839829468; NI res_563482_839829468; i_563401_839829468 = (Tcfilesection530005)0; res_563482_839829468 = ((NI) 12); { while (1) { Ropeobj179006* LOC56; Ropeobj179006* LOC57; if (!(res_563482_839829468 <= ((NI) 16))) goto LA55; i_563401_839829468 = ((Tcfilesection530005) (res_563482_839829468)); LOC56 = (Ropeobj179006*)0; LOC56 = gensectionstart_531015_2760143328(i_563401_839829468); add_179482_2381377266(&prc0, LOC56); add_179482_2381377266(&prc0, (*m0).s[(i_563401_839829468)- 0]); LOC57 = (Ropeobj179006*)0; LOC57 = gensectionend_531050_2760143328(i_563401_839829468); add_179482_2381377266(&prc0, LOC57); res_563482_839829468 += ((NI) 1); } LA55: ; } } memset((void*)LOC58, 0, sizeof(LOC58)); addf_180205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 11))- 0], prc0); { NIM_CHAR i_563442_839829468; Ropeobj179006* el_563443_839829468; TY530136 HEX3Atmp_563487_839829468; NIM_CHAR i_563490_839829468; i_563442_839829468 = (NIM_CHAR)0; el_563443_839829468 = (Ropeobj179006*)0; memset((void*)HEX3Atmp_563487_839829468, 0, sizeof(HEX3Atmp_563487_839829468)); memcpy((void*)HEX3Atmp_563487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_563487_839829468)); i_563490_839829468 = 48; { if (!((NU8)(((NIM_CHAR) (((NU8)(i_563490_839829468))))) <= (NU8)(57))) goto LA62; { while (1) { i_563442_839829468 = i_563490_839829468; el_563443_839829468 = HEX3Atmp_563487_839829468[(((NU8)(i_563490_839829468)))- 48]; { Ropeobj179006* ex0; TY533811 LOC70; if (!!((el_563443_839829468 == NIM_NIL))) goto LA68; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rope_179401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_563442_839829468)))) - ((NI) 48))))); LOC70[1] = el_563443_839829468; ex0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 11))- 0], ex0); } LA68: ; { if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_563490_839829468))))))) goto LA73; goto LA64; } LA73: ; i_563490_839829468 += ((NI) 1); } } LA64: ; } LA62: ; } } N_NIMCALL(void, finishtypedescriptions_536842_839829468)(Tcgen530027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Ropeobj179006* LOC3; if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2; LOC3 = (Ropeobj179006*)0; LOC3 = gettypedesc_536671_839829468(m0, (*m0).typestack->data[i0]); i0 += ((NI) 1); } LA2: ; } } N_NIMCALL(Ropeobj179006*, getcopyright_562665_839829468)(NimStringDesc* cfile0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; { TY179507 LOC5; if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 4))&63U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_179277_2381377266(((NimStringDesc*) &T839829468_686)); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1); } goto LA1; LA3: ; { TY537475 LOC7; NimStringDesc* LOC8; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rope_179277_2381377266(((NimStringDesc*) &T839829468_686)); LOC7[1] = rope_179277_2381377266(Os_177068_4151366050[(targetos_177629_4151366050)- 1].Field0); LOC7[2] = rope_179277_2381377266(Cpu_177496_4151366050[(targetcpu_177627_4151366050)- 1].Field0); LOC7[3] = rope_179277_2381377266(Cc_274413_2528170400[(ccompiler_274431_2528170400)- 1].Field0); LOC8 = (NimStringDesc*)0; LOC8 = getcompilecfilecmd_275284_2528170400(cfile0, NIM_FALSE); LOC7[4] = rope_179277_2381377266(LOC8); result0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5); } LA1: ; return result0; } static N_INLINE(void, addinttypes_562659_839829468)(Ropeobj179006** result0) { NimStringDesc* LOC1; TY179507 LOC2; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_177644_4151366050->Sup.len + 22); appendString(LOC1, ((NimStringDesc*) &T839829468_688)); appendString(LOC1, tnl_177644_4151366050); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rope_179401_2381377266(((NI64) (Cpu_177496_4151366050[(targetcpu_177627_4151366050)- 1].Field1))); addf_180205_2381377266(result0, LOC1, LOC2, 1); } N_NIMCALL(Ropeobj179006*, getfileheader_562683_839829468)(NimStringDesc* cfile0) { Ropeobj179006* result0; result0 = (Ropeobj179006*)0; result0 = getcopyright_562665_839829468(cfile0); addinttypes_562659_839829468(&result0); return result0; } N_NIMCALL(void, generatethreadlocalstorage_539717_839829468)(Tcgen530027* m0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY179507 LOC13; LOC3 = (NIM_BOOL)0; LOC3 = !((nimtv_539656_839829468 == NIM_NIL)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag530025) 1))&7U)))!=0); if (LOC5) goto LA6; LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; { Ttype293840* t_539761_839829468; NI i_539768_839829468; NI L_539770_839829468; t_539761_839829468 = (Ttype293840*)0; i_539768_839829468 = ((NI) 0); L_539770_839829468 = (nimtvdeps_539674_839829468 ? nimtvdeps_539674_839829468->Sup.len : 0); { while (1) { Ropeobj179006* LOC12; if (!(i_539768_839829468 < L_539770_839829468)) goto LA11; t_539761_839829468 = nimtvdeps_539674_839829468->data[i_539768_839829468]; LOC12 = (Ropeobj179006*)0; LOC12 = gettypedesc_536671_839829468(m0, t_539761_839829468); i_539768_839829468 += ((NI) 1); } LA11: ; } } memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = nimtv_539656_839829468; addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1); } LA7: ; } N_NIMCALL(void, generateheaders_561104_839829468)(Tcgen530027* m0) { NimStringDesc* LOC1; Tstrentry147009* it0; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_177644_4151366050->Sup.len + tnl_177644_4151366050->Sup.len + 20); appendString(LOC1, tnl_177644_4151366050); appendString(LOC1, ((NimStringDesc*) &T839829468_690)); appendString(LOC1, tnl_177644_4151366050); add_179487_2381377266(&(*m0).s[(((Tcfilesection530005) 1))- 0], LOC1); it0 = ((Tstrentry147009*) ((*m0).headerfiles.head)); { while (1) { if (!!((it0 == NIM_NIL))) goto LA3; { NimStringDesc* LOC8; NimStringDesc* LOC9; Ropeobj179006* LOC10; if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nsuReplaceChar((*it0).data, 96, 34); LOC8 = rawNewString(LOC9->Sup.len + tnl_177644_4151366050->Sup.len + 0); appendString(LOC8, LOC9); appendString(LOC8, tnl_177644_4151366050); LOC10 = (Ropeobj179006*)0; LOC10 = rope_179277_2381377266(LOC8); add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 1))- 0], LOC10); } goto LA4; LA6: ; { TY179507 LOC14; if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_179277_2381377266((*it0).data); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1); } goto LA4; LA12: ; { TY179507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_179277_2381377266((*it0).data); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1); } LA4: ; it0 = ((Tstrentry147009*) ((*it0).Sup.next)); } LA3: ; } } N_NIMCALL(Ropeobj179006*, genmodule_563491_839829468)(Tcgen530027* m0, NimStringDesc* cfile0) { Ropeobj179006* result0; Ropeobj179006* LOC1; result0 = (Ropeobj179006*)0; result0 = getfileheader_562683_839829468(cfile0); LOC1 = (Ropeobj179006*)0; LOC1 = genmergeinfo_531203_2760143328(m0); add_179482_2381377266(&result0, LOC1); generatethreadlocalstorage_539717_839829468(m0); generateheaders_561104_839829468(m0); { Tcfilesection530005 i_563614_839829468; NI res_563622_839829468; i_563614_839829468 = (Tcfilesection530005)0; res_563622_839829468 = ((NI) 1); { while (1) { Ropeobj179006* LOC5; Ropeobj179006* LOC6; if (!(res_563622_839829468 <= ((NI) 10))) goto LA4; i_563614_839829468 = ((Tcfilesection530005) (res_563622_839829468)); LOC5 = (Ropeobj179006*)0; LOC5 = gensectionstart_531015_2760143328(i_563614_839829468); add_179482_2381377266(&result0, LOC5); add_179482_2381377266(&result0, (*m0).s[(i_563614_839829468)- 0]); LOC6 = (Ropeobj179006*)0; LOC6 = gensectionend_531050_2760143328(i_563614_839829468); add_179482_2381377266(&result0, LOC6); res_563622_839829468 += ((NI) 1); } LA4: ; } } add_179482_2381377266(&result0, (*m0).s[(((Tcfilesection530005) 11))- 0]); return result0; } N_NIMCALL(void, updatecachedmodule_564813_839829468)(Tcgen530027* m0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_564204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj179006* code0; LOC3 = (NIM_BOOL)0; LOC3 = mergerequired_531832_2760143328(m0); if (!(LOC3)) goto LA4; LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)); LA4: ; if (!LOC3) goto LA5; mergefiles_532241_2760143328(cfile0, m0); geninitcode_563286_839829468(m0); finishtypedescriptions_536842_839829468(m0); code0 = genmodule_563491_839829468(m0, cfile0); writerope_179836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_274863_2528170400(cfile0); } LA5: ; addfiletolink_274872_2528170400(cfilenoext0); } N_NIMCALL(void, generatethreadvarssize_539771_839829468)(Tcgen530027* m0) { { NimStringDesc* externc0; TY179507 LOC12; if (!!((nimtv_539656_839829468 == NIM_NIL))) goto LA3; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = !((gcmd_170132_2607990831 == ((Tcommands170076) 2))); if (!(LOC7)) goto LA8; LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; externc0 = copyString(((NimStringDesc*) &T839829468_693)); } goto LA5; LA9: ; { externc0 = copyString(((NimStringDesc*) &T839829468_490)); } LA5: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_179277_2381377266(externc0); addf_180205_2381377266(&(*m0).s[(((Tcfilesection530005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1); } LA3: ; } N_NIMCALL(NIM_BOOL, shouldrecompile_564621_839829468)(Ropeobj179006* code0, NimStringDesc* cfile0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; result0 = NIM_TRUE; { NimStringDesc* objfile0; if (!!(((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 1))&63U)))!=0))) goto LA3; objfile0 = toobjfile_274859_2528170400(cfile0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = writeropeifnotequal_180511_2381377266(code0, cfile0); if (!LOC7) goto LA8; goto BeforeRet; } LA8: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = nosexistsFile(objfile0); if (!(LOC12)) goto LA13; LOC12 = nosfileNewer(objfile0, cfile0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; } LA14: ; } goto LA1; LA3: ; { writerope_179836_2381377266(code0, cfile0, NIM_FALSE); } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(void, writemodule_564637_839829468)(Tcgen530027* m0, NIM_BOOL pending0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_564204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj179006* code0; LOC3 = (NIM_BOOL)0; LOC3 = !((*m0).Sup.fromcache); if (LOC3) goto LA4; LOC3 = ((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 1))&63U)))!=0); LA4: ; if (!LOC3) goto LA5; geninitcode_563286_839829468(m0); finishtypedescriptions_536842_839829468(m0); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)) goto LA9; add_179482_2381377266(&(*m0).s[(((Tcfilesection530005) 7))- 0], mainmodprocs_530148_3723162438); generatethreadvarssize_539771_839829468(m0); } LA9: ; code0 = genmodule_563491_839829468(m0, cfile0); { NIM_BOOL LOC13; LOC13 = (NIM_BOOL)0; LOC13 = shouldrecompile_564621_839829468(code0, cfile0); if (!LOC13) goto LA14; addfiletocompile_274863_2528170400(cfile0); } LA14: ; } goto LA1; LA5: ; { NIM_BOOL LOC17; NIM_BOOL LOC18; Ropeobj179006* code0; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = pending0; if (!(LOC18)) goto LA19; LOC18 = mergerequired_531832_2760143328(m0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 12))&31U)))!=0)); LA20: ; if (!LOC17) goto LA21; mergefiles_532241_2760143328(cfile0, m0); geninitcode_563286_839829468(m0); finishtypedescriptions_536842_839829468(m0); code0 = genmodule_563491_839829468(m0, cfile0); writerope_179836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_274863_2528170400(cfile0); } goto LA1; LA21: ; { NimStringDesc* LOC24; NIM_BOOL LOC25; LOC24 = (NimStringDesc*)0; LOC24 = toobjfile_274859_2528170400(cfilenoext0); LOC25 = (NIM_BOOL)0; LOC25 = nosexistsFile(LOC24); if (!!(LOC25)) goto LA26; addfiletocompile_274863_2528170400(cfile0); } goto LA1; LA26: ; LA1: ; addfiletolink_274872_2528170400(cfilenoext0); } N_NIMCALL(void, writeheader_564152_839829468)(Tcgen530027* m0) { Ropeobj179006* result0; Ropeobj179006* guard0; TY179507 LOC1; TY128506 LOC2; TY179507 LOC3; TY534289 LOC13; TY179507 LOC14; result0 = getcopyright_562665_839829468((*m0).filename); memset((void*)LOC1, 0, sizeof(LOC1)); memset((void*)(&LOC2), 0, sizeof(LOC2)); nossplitFile((*m0).filename, (&LOC2)); LOC1[0] = rope_179277_2381377266(LOC2.Field1); guard0 = HEX25_179905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = guard0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1); addinttypes_562659_839829468(&result0); generateheaders_561104_839829468(m0); generatethreadlocalstorage_539717_839829468(m0); { Tcfilesection530005 i_564174_839829468; NI res_564200_839829468; i_564174_839829468 = (Tcfilesection530005)0; res_564200_839829468 = ((NI) 1); { while (1) { Ropeobj179006* LOC7; Ropeobj179006* LOC8; if (!(res_564200_839829468 <= ((NI) 10))) goto LA6; i_564174_839829468 = ((Tcfilesection530005) (res_564200_839829468)); LOC7 = (Ropeobj179006*)0; LOC7 = gensectionstart_531015_2760143328(i_564174_839829468); add_179482_2381377266(&result0, LOC7); add_179482_2381377266(&result0, (*m0).s[(i_564174_839829468)- 0]); LOC8 = (Ropeobj179006*)0; LOC8 = gensectionend_531050_2760143328(i_564174_839829468); add_179482_2381377266(&result0, LOC8); res_564200_839829468 += ((NI) 1); } LA6: ; } } add_179482_2381377266(&result0, (*m0).s[(((Tcfilesection530005) 11))- 0]); { if (!((gglobaloptions_170130_2607990831 &((NU64)1<<((NU)(((Tglobaloption170013) 8))&63U)))!=0)) goto LA11; add_179487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } LA11: ; memset((void*)LOC13, 0, sizeof(LOC13)); addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = guard0; addf_180205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1); writerope_179836_2381377266(result0, (*m0).filename, NIM_FALSE); } N_NIMCALL(void, cgenwritemodules_564902_839829468)(void) { { if (!!((generatedheader_533201_839829468 == NIM_NIL))) goto LA3; finishmodule_564420_839829468(generatedheader_533201_839829468); } LA3: ; { while (1) { if (!(((NI) 0) < gforwardedprocscounter_530171_3723162438)) goto LA6; { Tcgen530027* m_564916_839829468; m_564916_839829468 = (Tcgen530027*)0; { NI i_564935_839829468; NI HEX3Atmp_564937_839829468; NI res_564939_839829468; i_564935_839829468 = (NI)0; HEX3Atmp_564937_839829468 = (NI)0; HEX3Atmp_564937_839829468 = (gmodules_530170_3723162438 ? (gmodules_530170_3723162438->Sup.len-1) : -1); res_564939_839829468 = ((NI) 0); { while (1) { if (!(res_564939_839829468 <= HEX3Atmp_564937_839829468)) goto LA10; i_564935_839829468 = res_564939_839829468; { if (!!((gmodules_530170_3723162438->data[i_564935_839829468] == NIM_NIL))) goto LA13; m_564916_839829468 = gmodules_530170_3723162438->data[i_564935_839829468]; { if (!!((*m_564916_839829468).Sup.fromcache)) goto LA17; finishmodule_564420_839829468(m_564916_839829468); } LA17: ; } LA13: ; res_564939_839829468 += ((NI) 1); } LA10: ; } } } } LA6: ; } { Tcgen530027* m_564917_839829468; m_564917_839829468 = (Tcgen530027*)0; { NI i_564946_839829468; NI HEX3Atmp_564948_839829468; NI res_564950_839829468; i_564946_839829468 = (NI)0; HEX3Atmp_564948_839829468 = (NI)0; HEX3Atmp_564948_839829468 = (gmodules_530170_3723162438 ? (gmodules_530170_3723162438->Sup.len-1) : -1); res_564950_839829468 = ((NI) 0); { while (1) { if (!(res_564950_839829468 <= HEX3Atmp_564948_839829468)) goto LA22; i_564946_839829468 = res_564950_839829468; { if (!!((gmodules_530170_3723162438->data[i_564946_839829468] == NIM_NIL))) goto LA25; m_564917_839829468 = gmodules_530170_3723162438->data[i_564946_839829468]; { if (!(*m_564917_839829468).Sup.fromcache) goto LA29; updatecachedmodule_564813_839829468(m_564917_839829468); } goto LA27; LA29: ; { writemodule_564637_839829468(m_564917_839829468, NIM_TRUE); } LA27: ; } LA25: ; res_564950_839829468 += ((NI) 1); } LA22: ; } } } writemapping_275789_2528170400(gmapping_530152_3723162438); { if (!!((generatedheader_533201_839829468 == NIM_NIL))) goto LA34; writeheader_564152_839829468(generatedheader_533201_839829468); } LA34: ; } N_NIMCALL(void, nullify_563833_839829468)(Ropeobj179006** arr0) { { Tcfilesection530005 i_563848_839829468; NI res_563853_839829468; i_563848_839829468 = (Tcfilesection530005)0; res_563853_839829468 = ((NI) 0); { while (1) { if (!(res_563853_839829468 <= ((NI) 17))) goto LA3; i_563848_839829468 = ((Tcfilesection530005) (res_563853_839829468)); unsureAsgnRef((void**) (&arr0[(i_563848_839829468)- 0]), NIM_NIL); res_563853_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, nullify_563858_839829468)(Ropeobj179006** arr0) { { NIM_CHAR i_564014_839829468; NI res_564019_839829468; i_564014_839829468 = (NIM_CHAR)0; res_564019_839829468 = ((NI) 48); { while (1) { if (!(res_564019_839829468 <= ((NI) 57))) goto LA3; i_564014_839829468 = ((NIM_CHAR) (res_564019_839829468)); unsureAsgnRef((void**) (&arr0[(((NU8)(i_564014_839829468)))- 48]), NIM_NIL); res_564019_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, resetmodule_563763_839829468)(Tcgen530027* m0) { initlinkedlist_147031_3771138726((&(*m0).headerfiles)); initintset_269885_2627731572((&(*m0).declaredprotos)); initidtable_297019_850551059((&(*m0).forwtypecache)); asgnRef((void**) (&(*m0).initproc), newproc_530206_3723162438(NIM_NIL, m0)); (*(*m0).initproc).options = initprocoptions_563635_839829468(m0); asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_563625_839829468(m0)); asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_563630_839829468(m0)); initnodetable_297085_850551059((&(*m0).datacache)); if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack); (*m0).typestack = (Ttypeseq293836*) newSeqRC1((&NTI293836), 0); if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs); (*m0).forwardedprocs = (Tsymseq293804*) newSeqRC1((&NTI293804), 0); asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_534596_839829468(m0)); asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_534596_839829468(m0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag293184) 13))&31U)))!=0)) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag530025) 0))%(sizeof(NU8)*8)); } goto LA1; LA3: ; { (*m0).flags &= ~(((NU8)1) << ((((Codegenflag530025) 0)) % (sizeof(NU8)*8))); } LA1: ; nullify_563833_839829468((*m0).s); (*m0).typenodes = ((NI) 0); (*m0).nimtypes = ((NI) 0); nullify_563858_839829468((*m0).extensionloaders); (*m0).Sup.fromcache = NIM_TRUE; } N_NIMCALL(void, resetcgenmodules_564024_839829468)(void) { { Tcgen530027* m_564026_839829468; m_564026_839829468 = (Tcgen530027*)0; { NI i_564031_839829468; NI HEX3Atmp_564033_839829468; NI res_564035_839829468; i_564031_839829468 = (NI)0; HEX3Atmp_564033_839829468 = (NI)0; HEX3Atmp_564033_839829468 = (gmodules_530170_3723162438 ? (gmodules_530170_3723162438->Sup.len-1) : -1); res_564035_839829468 = ((NI) 0); { while (1) { if (!(res_564035_839829468 <= HEX3Atmp_564033_839829468)) goto LA4; i_564031_839829468 = res_564035_839829468; { if (!!((gmodules_530170_3723162438->data[i_564031_839829468] == NIM_NIL))) goto LA7; m_564026_839829468 = gmodules_530170_3723162438->data[i_564031_839829468]; resetmodule_563763_839829468(m_564026_839829468); } LA7: ; res_564035_839829468 += ((NI) 1); } LA4: ; } } } } NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) { nimRegisterGlobalMarker(T839829468_2); nimRegisterGlobalMarker(T839829468_3); nimRegisterGlobalMarker(T839829468_5); nimRegisterGlobalMarker(T839829468_6); nimRegisterGlobalMarker(T839829468_7); nimRegisterGlobalMarker(T839829468_8); asgnRefNoCycle((void**) (&indent_533655_839829468), rope_179277_2381377266(((NimStringDesc*) &T839829468_4))); if (nimtvdeps_539674_839829468) nimGCunrefNoCycle(nimtvdeps_539674_839829468); nimtvdeps_539674_839829468 = (Ttypeseq293836*) newSeqRC1((&NTI293836), 0); chckNil((void*)(&nimtvdeclared_539675_839829468)); genericReset((void*)(&nimtvdeclared_539675_839829468), (&NTI269030)); initintset_269885_2627731572((&nimtvdeclared_539675_839829468)); breakpointid_549860_839829468 = ((NI) 0); } NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) { }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, and false is returned. bool CheckConstraintExpression(Expr *CE); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction(TemplateDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check that the associated constraints of a template declaration match the /// associated constraints of an older declaration of which it is a /// redeclaration. bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old, TemplateParameterList *New); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(FunctionDecl *MD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, SourceLocation ConceptNameLoc, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. struct OpenMPDeclareVariantCtsSelectorData { OMPDeclareVariantAttr::CtxSelectorSetType CtxSet = OMPDeclareVariantAttr::CtxSetUnknown; OMPDeclareVariantAttr::CtxSelectorType Ctx = OMPDeclareVariantAttr::CtxUnknown; MutableArrayRef<StringRef> ImplVendors; ExprResult CtxScore; explicit OpenMPDeclareVariantCtsSelectorData() = default; explicit OpenMPDeclareVariantCtsSelectorData( OMPDeclareVariantAttr::CtxSelectorSetType CtxSet, OMPDeclareVariantAttr::CtxSelectorType Ctx, MutableArrayRef<StringRef> ImplVendors, ExprResult CtxScore) : CtxSet(CtxSet), Ctx(Ctx), ImplVendors(ImplVendors), CtxScore(CtxScore) {} }; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, SourceRange SR, const Sema::OpenMPDeclareVariantCtsSelectorData &Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion, bool &FunctionConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
parallel.c
#include "parallel.h" #include <stdlib.h> #include <math.h> #include <assert.h> #if TCI_USE_OPENMP_THREADS int tci_parallelize(tci_thread_func func, void* payload, unsigned nthread, unsigned arity) { if (nthread <= 1) { func(tci_single, payload); return 0; } tci_context* context; int ret = tci_context_init(&context, nthread, arity); if (ret != 0) return ret; #pragma omp parallel num_threads(nthread) { tci_comm comm; tci_comm_init(&comm, context, nthread, (unsigned)omp_get_thread_num(), 1, 0); func(&comm, payload); #pragma omp barrier tci_comm_destroy(&comm); } return 0; } #elif TCI_USE_OMPTASK_THREADS int tci_parallelize(tci_thread_func func, void* payload, unsigned nthread, unsigned arity) { #pragma omp parallel num_threads(nthread) { #pragma omp single func(tci_single, payload); } return 0; } #elif TCI_USE_PTHREADS_THREADS typedef struct { tci_thread_func func; void* payload; tci_context* context; unsigned nthread, tid; } tci_thread_data; void* tci_run_thread(void* raw_data) { tci_thread_data* data = (tci_thread_data*)raw_data; tci_comm comm; tci_comm_init(&comm, data->context, data->nthread, data->tid, 1, 0); data->func(&comm, data->payload); tci_comm_destroy(&comm); return NULL; } int tci_parallelize(tci_thread_func func, void* payload, unsigned nthread, unsigned arity) { if (nthread <= 1) { func(tci_single, payload); return 0; } tci_context* context; int ret = tci_context_init(&context, nthread, arity); if (ret != 0) return ret; pthread_t threads[nthread]; tci_thread_data data[nthread]; tci_comm comm0; tci_comm_init(&comm0, context, nthread, 0, 1, 0); for (unsigned i = 1;i < nthread;i++) { data[i].func = func; data[i].payload = payload; data[i].context = context; data[i].nthread = nthread; data[i].tid = i; int ret = pthread_create(&threads[i], NULL, tci_run_thread, &data[i]); if (ret != 0) { for (unsigned j = 1;j < i;j++) pthread_join(threads[j], NULL); return ret; } } func(&comm0, payload); for (unsigned i = 1;i < nthread;i++) { pthread_join(threads[i], NULL); } return tci_comm_destroy(&comm0); } #elif TCI_USE_WINDOWS_THREADS //TODO typedef struct { tci_thread_func func; void* payload; tci_context* context; unsigned nthread, tid; } tci_thread_data; DWORD WINAPI tci_run_thread(void* raw_data) { tci_thread_data* data = (tci_thread_data*)raw_data; tci_comm comm; tci_comm_init(&comm, data->context, data->nthread, data->tid, 1, 0); data->func(&comm, data->payload); tci_comm_destroy(&comm); return NULL; } int tci_parallelize(tci_thread_func func, void* payload, unsigned nthread, unsigned arity) { if (nthread <= 1) { func(tci_single, payload); return 0; } tci_context* context; int ret = tci_context_init(&context, nthread, arity); if (ret != 0) return ret; HANDLE threads[nthread-1]; tci_thread_data data[nthread-1]; for (unsigned i = 0;i < nthread-1;i++) { data[i].func = func; data[i].payload = payload; data[i].context = context; data[i].nthread = nthread; data[i].tid = i+1; threads[i] = CreateThread(NULL, 0, tci_run_thread, &data[i], 0, NULL); if (!threads[i]) { WaitForMultipleObjects(i, threads, TRUE, INFINITE); return -1; } } tci_comm comm0; tci_comm_init(&comm0, context, nthread, 0, 1, 0); func(&comm0, payload); WaitForMultipleObjects(nthread-1, threads, TRUE, INFINITE); return tci_comm_destroy(&comm0); } #else // TCI_USE_TBB_THREADS, TCI_USE_DISPATCH_THREADS, // TCI_USE_PPL_THREADS, single threaded int tci_parallelize(tci_thread_func func, void* payload, unsigned nthread, unsigned arity) { tci_comm comm = {NULL, 1, 0, nthread, 0}; func(&comm, payload); return 0; } #endif void tci_prime_factorization(unsigned n, tci_prime_factors* factors) { factors->n = n; // all this is necessary to appease the warning gods factors->sqrt_n = (unsigned)lrint(floor(sqrt(n))); factors->f = 2; } unsigned tci_next_prime_factor(tci_prime_factors* factors) { for (;factors->f <= factors->sqrt_n;) { if (factors->f == 2) { if (factors->n%2 == 0) { factors->n /= 2; return 2; } factors->f = 3; } else if (factors->f == 3) { if (factors->n%3 == 0) { factors->n /= 3; return 3; } factors->f = 5; } else if (factors->f == 5) { if (factors->n%5 == 0) { factors->n /= 5; return 5; } factors->f = 7; } else if (factors->f == 7) { if (factors->n%7 == 0) { factors->n /= 7; return 7; } factors->f = 11; } else { if (factors->n%factors->f == 0) { factors->n /= factors->f; return factors->f; } factors->f++; } } if (factors->n != 1) { unsigned tmp = factors->n; factors->n = 1; return tmp; } return 1; } #define TCI_USE_EXPENSIVE_PARTITION 0 #if TCI_USE_EXPENSIVE_PARTITION /* * Assumes base > 0 and power >= 0. */ static int ipow(int base, int power) { int p = 1; for (int mask = 0x1;mask <= power;mask <<= 1) { if (power&mask) p *= base; base *= base; } return p; } #endif void tci_partition_2x2(unsigned nthread, uint64_t work1, unsigned max1, uint64_t work2, unsigned max2, unsigned* nt1, unsigned* nt2) { max1 = TCI_MIN(TCI_MAX(max1, 1), nthread); max2 = TCI_MIN(TCI_MAX(max2, 1), nthread); if (nthread < 4) { if (max2 < max1 || (max1 == max2 && work1 >= work2)) { *nt1 = nthread; *nt2 = 1; } else { *nt1 = 1; *nt2 = nthread; } return; } tci_prime_factors factors; tci_prime_factorization(nthread, &factors); #if !TCI_USE_EXPENSIVE_PARTITION unsigned num1 = 1; unsigned num2 = 1; unsigned f; while ((f = tci_next_prime_factor(&factors)) > 1) { if ((work2 >= work1 || num1*f > max1) && num2*f <= max2) { work2 /= f; num2 *= f; } else { work1 /= f; num1 *= f; } } *nt1 = num1; *nt2 = num2; #else /* * Eight distinct prime factors handles all numbers up to 223092870 */ int fact[8]; int mult[8]; int nfact = 1; fact[0] = tci_next_prime_factor(&factors); mult[0] = 1; int f; while ((f = tci_next_prime_factor(&factors)) > 1) { if (f == fact[nfact-1]) { mult[nfact-1]++; } else { nfact++; fact[nfact-1] = f; mult[nfact-1] = 1; } } int ntake[8] = {0}; int64_t min_diff = INT64_MAX; bool done = false; while (!done) { int x = 1; int y = 1; for (int i = 0;i < nfact;i++) { x *= ipow(fact[i], ntake[i]); y *= ipow(fact[i], mult[i]-ntake[i]); } int64_t diff = llabs(x*work2 - y*work1); if (diff < min_diff) { min_diff = diff; *nt1 = x; *nt2 = y; } for (int i = 0;i < nfact;i++) { if (++ntake[i] > mult[i]) { ntake[i] = 0; if (i == nfact-1) done = true; else continue; } break; } } #endif assert((*nt1)*(*nt2) == nthread); }
quicksort.c
#include <stdio.h> #include <stdlib.h> #include "ctimer.h" #include <omp.h> void quicksort(int *array, int start, int end); int divide(int *array, int start, int end); void main() { const int n = 9; const int p = 4; int a[] = { 7, 12, 1, -2, 0, 15, 4, 11, 9}; int i; printf("\n\nVector desordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); #pragma omp parallel #pragma omp single quicksort( a, 0, n-1); printf("\n\nVector ordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); } // Función para dividir el array y hacer los intercambios int divide(int *array, int start, int end) { int left; int right; int pivot; int temp; pivot = array[start]; left = start; right = end; // Mientras no se cruzen los índices while (left < right) { while (array[right] > pivot) { right--; } while ((left < right) && (array[left] <= pivot)) { left++; } // Si todavía no se cruzan los indices seguimos intercambiando if (left < right) { temp = array[left]; array[left] = array[right]; array[right] = temp; } } // Los índices ya se han cruzado, ponemos el pivot en el lugar que le corresponde temp = array[right]; array[right] = array[start]; array[start] = temp; // La nueva posición del pivot return right; } // Función recursiva para hacer el ordenamiento void quicksort(int *array, int start, int end) { int pivot; if (start < end) { pivot = divide(array, start, end); // Ordeno la lista de los menores #pragma omp task quicksort(array, start, pivot - 1); // Ordeno la lista de los mayores #pragma omp task quicksort(array, pivot + 1, end); } }
DRB097-target-teams-distribute-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #define min(x, y) (((x) < (y)) ? (x) : (y)) /* use of omp target + teams + distribute + parallel for */ int main(int argc, char* argv[]) { int i, i2; int len = 2560; double sum =0.0, sum2=0.0; double a[len], b[len]; /*Initialize with some values*/ #pragma omp parallel for private(i ) for (i=0; i<len; i++) { a[i]= ((double)i)/2.0; b[i]= ((double)i)/3.0; } #pragma omp parallel for private(i ,i2 ) reduction(+:sum) for (i2=0; i2< len; i2+=256) #pragma omp parallel for private(i ) reduction(+:sum) for (i=i2;i< min(i2+256, len); i++) sum += a[i]*b[i]; /* CPU reference computation */ #pragma omp parallel for private(i ) reduction(+:sum2) for (i=0;i< len; i++) sum2 += a[i]*b[i]; printf ("sum=%f sum2=%f\n", sum, sum2); return 0; }
IPB2_fmt_plug.c
/* * IPB2_fmt.c (version 4) * * Invision Power Board 2.x salted MD5 module for Solar Designer's JtR * Uses Solar Designer's MD5 implementation. * regenrecht at o2.pl, Jan 2006 * * Hashes list should have form of username:$IPB2$salt$hash * Values to be taken from IPB database, where: * salt = bin2hex(ibf_members_converge.converge_pass_salt) * hash = ibf_members_converge.converge_pass_hash */ #if FMT_EXTERNS_H extern struct fmt_main fmt_IPB2; #elif FMT_REGISTERS_H john_register_one(&fmt_IPB2); #else #include <string.h> #include "arch.h" #include "misc.h" #include "md5.h" #include "johnswap.h" #include "common.h" #include "formats.h" #include "simd-intrinsics.h" #if defined(_OPENMP) #include <omp.h> static unsigned int omp_t = 1; #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 512 // Tuned K8-dual HT #endif #else #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #endif #else #define omp_t 1 #endif #include "memdbg.h" #define FORMAT_LABEL "ipb2" #define FORMAT_NAME "Invision Power Board 2.x" #define FORMAT_TAG "$IPB2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define BINARY_ALIGN 4 #define BINARY_SIZE 16 #define MD5_HEX_SIZE (BINARY_SIZE * 2) #define SALT_SIZE MD5_HEX_SIZE #define SALT_ALIGN 4 #define SALT_LENGTH 5 #define PLAINTEXT_LENGTH 31 #define CIPHERTEXT_LENGTH (1 + 4 + 1 + SALT_LENGTH * 2 + 1 + MD5_HEX_SIZE) #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #if ARCH_LITTLE_ENDIAN==1 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #endif #else #define NBKEYS 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"$IPB2$2e75504633$d891f03a7327639bc632d62a7f302604", "welcome"}, {"$IPB2$735a213a4e$4f23de7bb115139660db5e953153f28a", "enter"}, {"$IPB2$5d75343455$de98ba8ca7bb16f43af05e9e4fb8afee", "matrix"}, {"$IPB2$556c576c39$16d4f29c71b05bd75e61d0254800bfa3", "123456"}, {NULL} }; static const char itoa16_shr_04[] = "0000000000000000" "1111111111111111" "2222222222222222" "3333333333333333" "4444444444444444" "5555555555555555" "6666666666666666" "7777777777777777" "8888888888888888" "9999999999999999" "aaaaaaaaaaaaaaaa" "bbbbbbbbbbbbbbbb" "cccccccccccccccc" "dddddddddddddddd" "eeeeeeeeeeeeeeee" "ffffffffffffffff"; static const char itoa16_and_0f[] = "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef" "0123456789abcdef"; static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #if SIMD_COEF_32 static unsigned char *saved_key; static unsigned char *key_buf; static unsigned char *empty_key; static unsigned char *crypt_key; static uint32_t *cur_salt; static int new_salt; static int new_key; #else static char (*saved_key)[2*MD5_HEX_SIZE]; static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; #endif static void init(struct fmt_main *self) { #if SIMD_COEF_32 unsigned int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; // these 2 lines of change, allows the format to work with // [Options] FormatBlockScaleTuneMultiplier= without other format change omp_t *= self->params.max_keys_per_crypt; omp_t /= NBKEYS; self->params.max_keys_per_crypt = (omp_t*NBKEYS); #endif #if SIMD_COEF_32 key_buf = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); empty_key = mem_calloc_align(64 * NBKEYS, sizeof(empty_key), MEM_ALIGN_SIMD); for (i = 0; i < NBKEYS; ++i) { empty_key[GETPOS(0, i)] = 0x80; ((unsigned int*)empty_key)[14*SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32*16*SIMD_COEF_32] = (2 * MD5_HEX_SIZE)<<3; } crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); saved_key = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { MEM_FREE(saved_plain); MEM_FREE(saved_key); MEM_FREE(crypt_key); #if SIMD_COEF_32 MEM_FREE(empty_key); MEM_FREE(key_buf); #endif } static int valid(char *ciphertext, struct fmt_main *self) { if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH) return 0; if (ciphertext[16] != '$') return 0; if (strspn(ciphertext+6, HEXCHARS_lc) != SALT_LENGTH*2) return 0; if (strspn(ciphertext+17, HEXCHARS_lc) != MD5_HEX_SIZE) return 0; return 1; } static void *get_binary(char *ciphertext) { static uint32_t out[BINARY_SIZE/4]; unsigned char *binary_cipher = (unsigned char*)out; int i; ciphertext += 17; for (i = 0; i < BINARY_SIZE; ++i) binary_cipher[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; #if !ARCH_LITTLE_ENDIAN && defined (SIMD_COEF_32) alter_endianity(out, BINARY_SIZE); #endif return (void*)out; } static void *get_salt(char *ciphertext) { static uint32_t hex_salt[MD5_HEX_SIZE/4]; unsigned char binary_salt[SALT_LENGTH]; unsigned char salt_hash[BINARY_SIZE]; static MD5_CTX ctx; int i; ciphertext += FORMAT_TAG_LEN; for (i = 0; i < SALT_LENGTH; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; MD5_Init(&ctx); MD5_Update(&ctx, binary_salt, SALT_LENGTH); MD5_Final(salt_hash, &ctx); for (i = 0; i < BINARY_SIZE; ++i) { ((char*)hex_salt)[i*2] = itoa16[ARCH_INDEX(salt_hash[i] >> 4)]; ((char*)hex_salt)[i*2+1] = itoa16[ARCH_INDEX(salt_hash[i] & 0x0f)]; } return (void*)hex_salt; } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; new_salt = 1; #else int index; for (index = 0; index < omp_t * MAX_KEYS_PER_CRYPT; index++) memcpy(saved_key[index], salt, MD5_HEX_SIZE); #endif } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 strnzcpy(saved_plain[index], key, sizeof(*saved_plain)); new_key = 1; #else unsigned char key_hash[BINARY_SIZE]; unsigned char *kh = key_hash; unsigned char *key_ptr = (unsigned char*)saved_key[index] + MD5_HEX_SIZE; unsigned char v; int i, len; MD5_CTX ctx; len = strnzcpyn(saved_plain[index], key, sizeof(*saved_plain)); MD5_Init(&ctx); MD5_Update(&ctx, key, len); MD5_Final(key_hash, &ctx); for (i = 0; i < BINARY_SIZE; ++i) { v = *kh++; *key_ptr++ = itoa16_shr_04[ARCH_INDEX(v)]; *key_ptr++ = itoa16_and_0f[ARCH_INDEX(v)]; } #endif } static char *get_key(int index) { return saved_plain[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_COEF_32 #if defined(_OPENMP) int t; #pragma omp parallel for for (t = 0; t < omp_t; t++) #define ti (t*NBKEYS+index) #else #define t 0 #define ti index #endif { unsigned int index, i; if (new_salt) for (index = 0; index < NBKEYS; index++) { const uint32_t *sp = cur_salt; #if ARCH_LITTLE_ENDIAN uint32_t *kb = (uint32_t*)&saved_key[GETPOS(0, ti)]; for (i = 0; i < MD5_HEX_SIZE / 4; i++, kb += SIMD_COEF_32) *kb = *sp++; #else uint32_t *kb = (uint32_t*)&saved_key[GETPOS(3, ti)]; for (i = 0; i < MD5_HEX_SIZE / 4; i++, kb += SIMD_COEF_32) *kb = JOHNSWAP(*sp++); #endif } if (new_key) for (index = 0; index < NBKEYS; index++) { const uint32_t *key = (uint32_t*)saved_plain[ti]; int len = 0, temp; #if ARCH_LITTLE_ENDIAN uint32_t *kb = (uint32_t*)&key_buf[GETPOS(0, ti)]; uint32_t *keybuffer = kb; while((unsigned char)(temp = *key++)) { if (!(temp & 0xff00)) { *kb = (unsigned char)temp | (0x80 << 8); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *kb = (unsigned short)temp | (0x80 << 16); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *kb = temp | (0x80U << 24); len+=3; goto key_cleaning; } *kb = temp; #else uint32_t *kb = (uint32_t*)&key_buf[GETPOS(3, ti)]; uint32_t *keybuffer = kb; while((temp = *key++) & 0xff000000) { if (!(temp & 0xff0000)) { *kb = JOHNSWAP((temp & 0xff000000) | (0x80 << 16)); len++; goto key_cleaning; } if (!(temp & 0xff00)) { *kb = JOHNSWAP((temp & 0xffff0000) | (0x80 << 8)); len+=2; goto key_cleaning; } if (!(temp & 0xff)) { *kb = JOHNSWAP(temp | 0x80U); len+=3; goto key_cleaning; } *kb = JOHNSWAP(temp); #endif len += 4; kb += SIMD_COEF_32; } *kb = 0x00000080; key_cleaning: kb += SIMD_COEF_32; while(*kb) { *kb = 0; kb += SIMD_COEF_32; } keybuffer[14*SIMD_COEF_32] = len << 3; } SIMDmd5body(&key_buf[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN); for (index = 0; index < NBKEYS; index++) { // Somehow when I optimised this it got faster in Valgrind but slower IRL for (i = 0; i < BINARY_SIZE; i++) { unsigned char v = crypt_key[GETOUTPOS(i, ti)]; saved_key[GETPOS(MD5_HEX_SIZE + 2 * i, ti)] = itoa16_shr_04[ARCH_INDEX(v)]; saved_key[GETPOS(MD5_HEX_SIZE + 2 * i + 1, ti)] = itoa16_and_0f[ARCH_INDEX(v)]; } } SIMDmd5body(&saved_key[t*NBKEYS*64], (unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN); SIMDmd5body(empty_key, (unsigned int*)&crypt_key[t*NBKEYS*16], (unsigned int*)&crypt_key[t*NBKEYS*16], SSEi_RELOAD|SSEi_MIXED_IN); } //dump_stuff_mmx_msg("\nfinal ", saved_key, 64, count-1); //dump_out_mmx_msg("result", crypt_key, 16, count-1); new_salt = new_key = 0; #else #ifdef _OPENMP int index; #pragma omp parallel for for (index = 0; index < count; index++) #else #define index 0 #endif { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], MD5_HEX_SIZE * 2); MD5_Final((unsigned char*)crypt_key[index], &ctx); } #undef index #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for (;y<SIMD_PARA_MD5*omp_t;y++) #else for (;y<SIMD_PARA_MD5;y++) #endif for (x = 0; x < SIMD_COEF_32; x++) { if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index) { return 1; } static int cmp_one(void * binary, int index) { #ifdef SIMD_COEF_32 unsigned int i,x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; for (i=0;i<(BINARY_SIZE/4);i++) if ( ((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] ) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } #define COMMON_GET_HASH_SIMD32 4 #define COMMON_GET_HASH_VAR crypt_key #include "common-get-hash.h" static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_IPB2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
LCC04Traversal.h
/** * @file LCC04Traversal.h * @author C.Menges, based on tchipevn (original source: * ls1-mardyn/src/particleContainer/LinkedCellTraversals/C04CellPairTraversal.h) * @date 15.06.2019 */ #pragma once #include "autopas/containers/cellPairTraversals/C08BasedTraversal.h" #include "autopas/containers/linkedCells/traversals/LCC08CellHandler.h" #include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h" #include "autopas/pairwiseFunctors/CellFunctor.h" #include "autopas/utils/ArrayUtils.h" #include "autopas/utils/ThreeDimensionalMapping.h" #include "autopas/utils/WrapOpenMP.h" namespace autopas { /** * This class provides the c04 traversal. * * The traversal uses the c04 base step performed on every single cell. Since * these steps overlap a domain coloring with four colors is applied. * * @tparam ParticleCell the type of cells * @tparam PairwiseFunctor The functor that defines the interaction of two particles. * @tparam useSoA * @tparam useNewton3 */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> class LCC04Traversal : public C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>, public LCTraversalInterface<ParticleCell> { public: /** * Constructor of the c04 traversal. * @param dims The dimensions of the cellblock, i.e. the number of cells in x, * y and z direction. * @param pairwiseFunctor The functor that defines the interaction of two particles. * @param interactionLength Interaction length. * @param cellLength cell length. */ LCC04Traversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor, const double interactionLength, const std::array<double, 3> &cellLength) : C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>(dims, pairwiseFunctor, interactionLength, cellLength), _cellOffsets32Pack(computeOffsets32Pack()), _cellHandler(pairwiseFunctor, this->_cellsPerDimension, interactionLength, cellLength, this->_overlap), _end(utils::ArrayMath::subScalar(utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension), 1l)) {} void traverseParticlePairs() override; [[nodiscard]] TraversalOption getTraversalType() const override { return TraversalOption::lc_c04; } [[nodiscard]] DataLayoutOption getDataLayout() const override { return dataLayout; } [[nodiscard]] bool getUseNewton3() const override { return useNewton3; } /** * C04 traversals are usable, if cellSizeFactor >= 1.0 and there are at least 3 cells for each dimension. * @return information about applicability */ [[nodiscard]] bool isApplicable() const override { if (dataLayout == DataLayoutOption::cuda) { return false; } // The cellsize cannot be smaller then the cutoff, if OpenMP is used. // Also see: https://github.com/AutoPas/AutoPas/issues/464 const double minLength = *std::min_element(this->_cellLength.cbegin(), this->_cellLength.cend()); const unsigned long minDim = *std::min_element(this->_cellsPerDimension.cbegin(), this->_cellsPerDimension.cend()); return minLength >= this->_interactionLength and minDim > 3; } private: void traverseSingleColor(std::vector<ParticleCell> &cells, int color); void processBasePack32(std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex); constexpr auto computeOffsets32Pack() const; [[nodiscard]] constexpr long parity(long x, long y, long z) const { return (x + y + z + 24) % 8; } std::array<std::array<long, 3>, 32> _cellOffsets32Pack; LCC08CellHandler<ParticleCell, PairwiseFunctor, dataLayout, useNewton3> _cellHandler; const std::array<long, 3> _end; }; /** * Computes the barriers of the aggregation of cells for each color * * @tparam ParticleCell * @tparam PairwiseFunctor * @tparam dataLayout * @tparam useNewton3 */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> constexpr auto LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::computeOffsets32Pack() const { using std::make_pair; using utils::ThreeDimensionalMapping::threeToOneD; std::array<std::array<long, 3>, 32> cellOffsets32Pack = {}; unsigned int i = 0; long z = 0l; cellOffsets32Pack[i++] = {1l, 1l, z}; cellOffsets32Pack[i++] = {1l, 2l, z}; cellOffsets32Pack[i++] = {2l, 1l, z}; cellOffsets32Pack[i++] = {2l, 2l, z}; // z = 1ul; z = 2ul for (z = 1l; z < 3l; ++z) { for (long y = 0l; y < 4l; y++) { for (long x = 0l; x < 4l; x++) { if ((x == 0l and y == 0l) or (x == 3l and y == 0l) or (x == 0l and y == 3l) or (x == 3l and y == 3l)) { continue; } cellOffsets32Pack[i++] = {x, y, z}; } } } z = 3ul; cellOffsets32Pack[i++] = {1l, 1l, z}; cellOffsets32Pack[i++] = {1l, 2l, z}; cellOffsets32Pack[i++] = {2l, 1l, z}; cellOffsets32Pack[i++] = {2l, 2l, z}; /// @todo C++20: mark as unlikely if (i != 32) { utils::ExceptionHandler::exception("Internal error: Wrong number of offsets (expected: 32, actual: {})", i); } return cellOffsets32Pack; } /** * Goes through the cells aggregated by one color and processes the particles in each cell that is part of the * aggregation by using the barriers saved in _cellOffset32Pack. * * @tparam ParticleCell * @tparam PairwiseFunctor * @tparam dataLayout * @tparam useNewton3 * @param cells * @param base3DIndex */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::processBasePack32( std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex) { using utils::ThreeDimensionalMapping::threeToOneD; std::array<long, 3> index; const std::array<long, 3> signedDims = utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension); for (auto Offset32Pack : _cellOffsets32Pack) { // compute 3D index bool isIn = true; for (int d = 0; d < 3; ++d) { index[d] = base3DIndex[d] + Offset32Pack[d]; isIn &= (index[d] >= 0l) and (index[d] < _end[d]); } if (isIn) { const unsigned long ulIndex = threeToOneD(index, signedDims); _cellHandler.processBaseCell(cells, ulIndex); } } } /** * Go through one color and search for blocks belonging to the specified color. * Uses two cartesian grids that are overlapping gridwise but not blockwise. * * @tparam ParticleCell * @tparam PairwiseFunctor * @tparam dataLayout * @tparam useNewton3 */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseParticlePairs() { auto &cells = *(this->_cells); #if defined(AUTOPAS_OPENMP) #pragma omp parallel #endif { for (int color = 0; color < 4; ++color) { traverseSingleColor(cells, color); #if defined(AUTOPAS_OPENMP) if (color < 3) { #pragma omp barrier } #endif } } // close parallel region } template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04Traversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseSingleColor( std::vector<ParticleCell> &cells, int color) { // we need to traverse one body-centered cubic (BCC) grid, which consists of two cartesian grids // colors 0 and 2 form one cartesian grid // colors 1 and 3 form another cartesian grid, whose origin is shifted by (2,2,2) // determine a starting point of one of the grids std::array<long, 3> startOfThisColor{}; switch (color % 2) { case 0: // colours 0 and 2 startOfThisColor = {-2l, -2l, -2l}; break; case 1: // colours 1 and 3 startOfThisColor = {0l, 0l, 0l}; break; } // calculate whether the calculated starting point is part of the color long correctParity = parity(startOfThisColor[0], startOfThisColor[1], startOfThisColor[2]); if (color >= 2) { correctParity += 4; } // to fix intel64 icpc compiler complaints about perfectly nested loop (tested with version 19.0.4.20190416). const long startX = startOfThisColor[0], endX = _end[0]; const long startY = startOfThisColor[1], endY = _end[1]; const long startZ = startOfThisColor[2], endZ = _end[2]; // first cartesian grid // grids are interlinked: one grid fills the gaps in the other grid #if defined(AUTOPAS_OPENMP) #pragma omp for schedule(dynamic, 1) collapse(3) nowait #endif for (long z = startZ; z < endZ; z += 4) { for (long y = startY; y < endY; y += 4) { for (long x = startX; x < endX; x += 4) { const long par = parity(x, y, z); if (par != correctParity) { continue; } const std::array<long, 3> base3DIndex = {x, y, z}; processBasePack32(cells, base3DIndex); } } } } } // namespace autopas
SingleEndLink.c
int x; int main() { #pragma omp single { int x; } #pragma omp single { 11; } }
r_numint.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <complex.h> #include "cint.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #include <assert.h> #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc); static void dot_ao_dm(double complex *vm, double complex *ao, double complex *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double complex Z1 = 1; double complex beta = 0; if (has0) { int box_id, bas_id, b0, blen, i, j; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &Z1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &Z1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCzdot_ao_dm(double complex *vm, double complex *ao, double complex *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel default(none) \ shared(vm, ao, dm, nao, nocc, ngrids, nbas, \ non0table, shls_slice, ao_loc) { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* conj(vv[n,m]) = ao1[n,ngrids] * conj(ao2[m,ngrids]) */ static void dot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_C = 'C'; const char TRANS_N = 'N'; const double complex Z1 = 1; if (has0) { int ib, jb, b0i, b0j, leni, lenj; int j1 = nbox; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); zgemm_(&TRANS_C, &TRANS_N, &lenj, &leni, &bgrids, &Z1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &Z1, vv+b0i*nao+b0j, &nao); } } } } } else { zgemm_(&TRANS_C, &TRANS_N, &nao, &nao, &bgrids, &Z1, ao2, &ngrids, ao1, &ngrids, &Z1, vv, &nao); } } /* vv[nao,nao] = conj(ao1[i,nao]) * ao2[i,nao] */ void VXCzdot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; memset(vv, 0, sizeof(double complex) * nao * nao); #pragma omp parallel default(none) \ shared(vv, ao1, ao2, nao, ngrids, nbas, hermi, \ non0table, shls_slice, ao_loc) { int ip, ib; double complex *v_priv = calloc(nao*nao, sizeof(double complex)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += conj(v_priv[ip]); } } free(v_priv); } if (hermi != 0) { NPzhermi_triu(nao, vv, hermi); } }
single.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp single { x++; } } printf("x=%d\n", x); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_single_in_block_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], parent_task_id=[[TASK_ID:[0-9]+]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID]]: ompt_event_single_in_block_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_single_others_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID]]: ompt_event_single_others_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], workshare_function={{0x[0-f]+}}, count=1 return 0; }
clansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlansy.c, normal z -> c, Fri Sep 28 17:38:08 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lansy * * Returns the norm of a symmetric matrix as * * clansy = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] A * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval float * The specified norm of the symmetric matrix A. * ******************************************************************************* * * @sa plasma_omp_clansy * @sa plasma_clansy * @sa plasma_slansy * @sa plasma_slansy * ******************************************************************************/ float plasma_clansy(plasma_enum_t norm, plasma_enum_t uplo, int n, plasma_complex32_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } // quick return if (n == 0) return 0.0; // Tune parameters if (plasma->tuning) plasma_tune_lansy(plasma, PlasmaComplexFloat, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. float *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (float*)malloc((size_t)A.mt*A.nt*sizeof(float)); break; case PlasmaOneNorm: case PlasmaInfNorm: work = (float*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(float)); break; case PlasmaFrobeniusNorm: work = (float*)malloc((size_t)2*A.mt*A.nt*sizeof(float)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); float value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_clansy(norm, uplo, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lansy * * Calculates the max, one, infinity or Frobenius norm of a symmetric matrix. * Non-blocking equivalent of plasma_clansy(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_clansy * @sa plasma_omp_clansy * @sa plasma_omp_slansy * @sa plasma_omp_slansy * ******************************************************************************/ void plasma_omp_clansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pclansy(norm, uplo, A, work, value, sequence, request); }
reduction2.c
/* Test multiple reduction clauses with different reduction operations */ #include <stdio.h> #include <omp.h> #define NUM_THREADS 4 int main () { int i,total=1000000, yy=10000,zz=10000; double res=0.0; omp_set_num_threads(NUM_THREADS); #pragma omp parallel for reduction(+:res) reduction(*:yy,zz) for (i=0; i<= total; i++){ res = res + i; res = res - 2*i; yy *=1; zz*=1; } printf("the sum of 1000000 is :%.0f\n", res); return 0; }
pooling_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #256] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "fmax v0.4s, v0.4s, v2.4s \n" "fmax v1.4s, v1.4s, v3.4s \n" "fmaxp v2.4s, v0.4s, v1.4s \n" "subs %w0, %w0, #1 \n" "st1 {v2.4s}, [%3], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%1]! \n" "vld1.f32 {d4-d7}, [%2]! \n" "vmax.f32 q0, q0, q2 \n" "vmax.f32 q1, q1, q3 \n" "vpmax.f32 d4, d0, d1 \n" "vpmax.f32 d5, d2, d3 \n" "subs %0, #1 \n" "vst1.f32 {d4-d5}, [%3]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(r0[0], r0[1]); float max1 = std::max(r1[0], r1[1]); *outptr = std::max(max0, max1); r0 += 2; r1 += 2; outptr++; } r0 += tailstep; r1 += tailstep; } } }
exchange_boundary_hoist_omp.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // perform a (intra-level) ghost zone exchange on vector id // NOTE exchange_boundary() only exchanges the boundary. // It will not enforce any boundary conditions // BC's are either the responsibility of a separate function or should be fused into the stencil // The argument shape indicates which of faces, edges, and corners on each box must be exchanged // If the specified shape exceeds the range of defined shapes, the code will default to STENCIL_SHAPE_BOX (i.e. exchange faces, edges, and corners) void exchange_boundary(level_type * level, int id, int shape){ double _timeCommunicationStart = getTime(); if( level->exchange_ghosts[shape].num_blocks[0] || level->exchange_ghosts[shape].num_blocks[1] || level->exchange_ghosts[shape].num_blocks[2] ) { #pragma omp parallel { int threadID = omp_get_thread_num(); double _timeStart; if(shape>=STENCIL_MAX_SHAPES)shape=STENCIL_SHAPE_BOX; // shape must be < STENCIL_MAX_SHAPES in order to safely index into exchange_ghosts[] int my_tag = (level->tag<<4) | shape; int buffer=0; int n; #ifdef USE_MPI int nMessages = level->exchange_ghosts[shape].num_recvs + level->exchange_ghosts[shape].num_sends; MPI_Request *recv_requests = level->exchange_ghosts[shape].requests; MPI_Request *send_requests = level->exchange_ghosts[shape].requests + level->exchange_ghosts[shape].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level->exchange_ghosts[shape].num_recvs>0){ if(threadID==0){ _timeStart = getTime(); for(n=0;n<level->exchange_ghosts[shape].num_recvs;n++){ MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n], level->exchange_ghosts[shape].recv_sizes[n], MPI_DOUBLE, level->exchange_ghosts[shape].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } level->timers.ghostZone_recv += (getTime()-_timeStart); }} // pack MPI send buffers... if(level->exchange_ghosts[shape].num_blocks[0]){ if(threadID==1)_timeStart = getTime(); if(threadID>0) // let thread 0 keep churning on Irecv's for(buffer=threadID-1;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer+=(level->num_threads-1)){ //#pragma omp for schedule(static,1) //for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[0][buffer]); } if(threadID==1)level->timers.ghostZone_pack += (getTime()-_timeStart); } // loop through MPI send buffers and post Isend's... if(level->exchange_ghosts[shape].num_sends>0){ #pragma omp barrier // wait for threads to finish packing... if(threadID==0){ _timeStart = getTime(); for(n=0;n<level->exchange_ghosts[shape].num_sends;n++){ MPI_Isend(level->exchange_ghosts[shape].send_buffers[n], level->exchange_ghosts[shape].send_sizes[n], MPI_DOUBLE, level->exchange_ghosts[shape].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } level->timers.ghostZone_send += (getTime()-_timeStart); } } #endif // exchange locally... try and hide within Isend latency... if(level->exchange_ghosts[shape].num_blocks[1]){ if(threadID==1)_timeStart = getTime(); if(threadID>0) for(buffer=threadID-1;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer+=(level->num_threads-1)){ //#pragma omp for schedule(static,1) //for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[1][buffer]); } if(threadID==1)level->timers.ghostZone_local += (getTime()-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages){ if(threadID==0){ _timeStart = getTime(); MPI_Waitall(nMessages,level->exchange_ghosts[shape].requests,level->exchange_ghosts[shape].status); level->timers.ghostZone_wait += (getTime()-_timeStart); }} // unpack MPI receive buffers if(level->exchange_ghosts[shape].num_blocks[2]){ #pragma omp barrier // wait for thread 0 to finish WaitAll if(threadID==0)_timeStart = getTime(); #pragma omp for schedule(static,1) for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[2][buffer]); } if(threadID==0)level->timers.ghostZone_unpack += (getTime()-_timeStart); } #endif }} level->timers.ghostZone_total += (double)(getTime()-_timeCommunicationStart); }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","(\"%.20gx%.20g%+.20g%+.20g\") `%s'", (double) geometry->width,(double) geometry->height, (double) geometry->x,(double) geometry->y,image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(CastDoubleToLong(floor(x))); return(CastDoubleToLong(ceil(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ crop_image=NewImageList(); width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=(Image *) NULL; crop_image=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { const char *artifact; Image *trim_image; RectangleInfo geometry, page; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } page=geometry; artifact=GetImageArtifact(image,"trim:minSize"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&page); if ((geometry.width < page.width) && (geometry.height < page.height)) { /* Limit trim to a minimum size. */ switch (image->gravity) { case CenterGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case NorthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthEastGravity: { geometry.y-=((ssize_t) page.height-geometry.height); break; } case EastGravity: { geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case SouthEastGravity: break; case SouthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; break; } case SouthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); break; } case WestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } default: break; } geometry.width=page.width; geometry.height=page.height; } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
mpush2.c
/* C Library for Skeleton 2D Electrostatic OpenMP PIC Code */ /* written by Viktor K. Decyk, UCLA */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include "mpush2.h" /*--------------------------------------------------------------------*/ double ranorm() { /* this program calculates a random number y from a gaussian distribution with zero mean and unit variance, according to the method of mueller and box: y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1)) y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)), where x is a random number uniformly distributed on (0,1). written for the ibm by viktor k. decyk, ucla local data */ static int r1 = 885098780, r2 = 1824280461; static int r4 = 1396483093, r5 = 55318673; static int iflg = 0; static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0; static double r0 = 0.0; int isc, i1; double ranorm, r3, asc, bsc, temp; if (iflg==1) { ranorm = r0; r0 = 0.0; iflg = 0; return ranorm; } isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r1 - (r1/isc)*isc; r3 = h1l*(double) r1 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r2/isc; isc = r2 - i1*isc; r0 = h1l*(double) r2 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r2 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r1 = r3 - ((double) isc)*bsc; temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc)); isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r4 - (r4/isc)*isc; r3 = h2l*(double) r4 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r5/isc; isc = r5 - i1*isc; r0 = h2l*(double) r5 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r5 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r4 = r3 - ((double) isc)*bsc; r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc); ranorm = temp*sin(r0); r0 = temp*cos(r0); iflg = 1; return ranorm; } /*--------------------------------------------------------------------*/ void cdistr2(float part[], float vtx, float vty, float vdx, float vdy, int npx, int npy, int idimp, int nop, int nx, int ny, int ipbc) { /* for 2d code, this subroutine calculates initial particle co-ordinates and velocities with uniform density and maxwellian velocity with drift part[n][0] = position x of particle n part[n][1] = position y of particle n part[n][2] = velocity vx of particle n part[n][3] = velocity vy of particle n vtx/vty = thermal velocity of electrons in x/y direction vdx/vdy = drift velocity of beam electrons in x/y direction npx/npy = initial number of particles distributed in x/y direction idimp = size of phase space = 4 nop = number of particles nx/ny = system length in x/y direction ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) ranorm = gaussian random number with zero mean and unit variance local data */ int j, k, k1, npxy; float edgelx, edgely, at1, at2, at3, sum1, sum2; double dsum1, dsum2; npxy = npx*npy; /* set boundary values */ edgelx = 0.0; edgely = 0.0; at1 = (float) nx/(float) npx; at2 = (float) ny/(float) npy; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) (ny-2)/(float) npy; } else if (ipbc==3) { edgelx = 1.0; at1 = (float) (nx-2)/(float) npx; } /* uniform density profile */ for (k = 0; k < npy; k++) { k1 = idimp*npx*k; at3 = edgely + at2*(((float) k) + 0.5); for (j = 0; j < npx; j++) { part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5); part[1+idimp*j+k1] = at3; } } /* maxwellian velocity distribution */ for (j = 0; j < npxy; j++) { part[2+idimp*j] = vtx*ranorm(); part[3+idimp*j] = vty*ranorm(); } /* add correct drift */ dsum1 = 0.0; dsum2 = 0.0; for (j = 0; j < npxy; j++) { dsum1 += part[2+idimp*j]; dsum2 += part[3+idimp*j]; } sum1 = dsum1; sum2 = dsum2; at1 = 1.0/(float) npxy; sum1 = at1*sum1 - vdx; sum2 = at1*sum2 - vdy; for (j = 0; j < npxy; j++) { part[2+idimp*j] -= sum1; part[3+idimp*j] -= sum2; } return; } /*--------------------------------------------------------------------*/ void cdblkp2l(float part[], int kpic[], int *nppmx, int idimp, int nop, int mx, int my, int mx1, int mxy1, int *irc) { /* this subroutine finds the maximum number of particles in each tile of mx, my to calculate size of segmented particle array ppart linear interpolation part = input particle array part[n][0] = position x of particle n part[n][1] = position y of particle n kpic = output number of particles per tile nppmx = return maximum number of particles in tile idimp = size of phase space = 4 nop = number of particles mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int j, k, n, m, isum, ist, npx, ierr; ierr = 0; /* clear counter array */ for (k = 0; k < mxy1; k++) { kpic[k] = 0; } /* find how many particles in each tile */ for (j = 0; j < nop; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = m/my; m = n + mx1*m; if (m < mxy1) { kpic[m] += 1; } else { ierr = ierr > (m - mxy1 + 1) ? ierr : (m - mxy1 + 1); } } /* find maximum */ isum = 0; npx = 0; for (k = 0; k < mxy1; k++) { ist = kpic[k]; npx = npx > ist ? npx : ist; isum += ist; } *nppmx = npx; /* check for errors */ if (ierr > 0) { *irc = ierr; } else if (isum != nop) { *irc = -1; } return; } /*--------------------------------------------------------------------*/ void cppmovin2l(float part[], float ppart[], int kpic[], int nppmx, int idimp, int nop, int mx, int my, int mx1, int mxy1, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my and copies to segmented array ppart linear interpolation input: all except ppart, kpic, output: ppart, kpic part/ppart = input/output particle arrays part[n][0] = position x of particle n in partition part[n][1] = position y of particle n in partition ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = velocity vx of particle n in tile m ppart[m][n][3] = velocity vy of particle n in tile m kpic = output number of particles per tile nppmx = maximum number of particles in tile idimp = size of phase space = 4 nop = number of particles mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int i, j, k, n, m, ip, ierr; ierr = 0; /* clear counter array */ for (k = 0; k < mxy1; k++) { kpic[k] = 0; } /* find addresses of particles at each tile and reorder particles */ for (j = 0; j < nop; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = m/my; m = n + mx1*m; ip = kpic[m]; if (ip < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j]; } } else { ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1; } kpic[m] = ip + 1; } if (ierr > 0) *irc = ierr; return; } /*--------------------------------------------------------------------*/ void cppcheck2l(float ppart[], int kpic[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int *irc) { /* this subroutine performs a sanity check to make sure particles sorted by x,y grid in tiles of mx, my, are all within bounds. tiles are assumed to be arranged in 2D linear memory input: all except irc output: irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k kpic[k] = number of reordered output particles in tile k idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 irc = particle error, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, moff, npp, j, k, ist, nn, mm; float edgelx, edgely, edgerx, edgery, dx, dy; mxy1 = mx1*my1; /* loop over tiles */ #pragma omp parallel for \ private(j,k,noff,moff,npp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx,dy) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; if (dx < edgelx) ist = 1; if (dx >= edgerx) ist = 2; if (dy < edgely) ist += 3; if (dy >= edgery) ist += 6; if (ist > 0) *irc = k + 1; } } return; } /*--------------------------------------------------------------------*/ void cgppush2l(float ppart[], float fxy[], int kpic[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with various boundary conditions. OpenMP version using guard cells data read in tiles particles stored segmented array 44 flops/particle, 12 loads, 4 stores input: all, output: ppart, ek equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = velocity vx of particle n in tile m ppart[m][n][3] = velocity vy of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) that is, convolution of electric field over particle shape kpic = number of particles per tile qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp; int i, j, k, nn, mm, mxv; float qtm, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; float sfxy[2*MXV*MYV]; /* float sfxy[2*(mx+1)*(my+1)]; */ double sum1, sum2; /* mxv = MXV; */ mxv = mx+1; qtm = qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy, \ vx,vy,sum1,sfxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[2*(i+mxv*j)] = fxy[2*(i+noff+nxv*(j+moff))]; sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))]; } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noff) + 2*mxv*(mm - moff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dx = amy*(dxp*sfxy[nn+2] + dx); dy = amy*(dxp*sfxy[nn+3] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[nn+1]; dx += dyp*(dxp*sfxy[nn+2] + vx); dy += dyp*(dxp*sfxy[nn+3] + vy); /* new velocity */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += vx*vx + vy*vy; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } sum2 += sum1; } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgppushf2l(float ppart[], float fxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data read in tiles particles stored segmented array 44 flops/particle, 12 loads, 4 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = velocity vx of particle n in tile m ppart[m][n][3] = velocity vy of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) that is, convolution of electric field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp; int i, j, k, ih, nh, nn, mm, mxv; float qtm, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; float anx, any, edgelx, edgely, edgerx, edgery; float sfxy[2*MXV*MYV]; /* float sfxy[2*(mx+1)*(my+1)]; */ double sum1, sum2; mxv = mx + 1; qtm = qbm*dt; anx = (float) nx; any = (float) ny; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy, \ dx,dy,vx,vy,edgelx,edgely,edgerx,edgery,sum1,sfxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; ih = 0; nh = 0; nn += 1; mm += 1; /* load local fields from global array */ for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[2*(i+mxv*j)] = fxy[2*(i+noff+nxv*(j+moff))]; sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))]; } } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noff) + 2*mxv*(mm - moff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dx = amy*(dxp*sfxy[nn+2] + dx); dy = amy*(dxp*sfxy[nn+3] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[nn+1]; dx += dyp*(dxp*sfxy[nn+2] + vx); dy += dyp*(dxp*sfxy[nn+3] + vy); /* new velocity */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += (vx*vx + vy*vy); ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgppost2l(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int nxv, int nyv, int mx1, int mxy1) { /* for 2d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP version using guard cells data deposited in tiles particles stored segmented array 17 flops/particle, 6 loads, 4 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m)=qm*(1.-dx)*(1.-dy) q(n+1,m)=qm*dx*(1.-dy) q(n,m+1)=qm*(1.-dx)*dy q(n+1,m+1)=qm*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m q[k][j] = charge density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 4 mx/my = number of grids in sorting cell in x/y nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nn, mm; float x, y, dxp, dyp, amx, amy; float sq[MXV*MYV]; /* float sq[(mx+1)*(my+1)]; */ mxv = mx + 1; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,sq) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* zero out local accumulator */ for (j = 0; j < mxv*(my+1); j++) { sq[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = nn - noff + mxv*(mm - moff); amx = qm - dxp; amy = 1.0f - dyp; /* deposit charge within tile to local accumulator */ x = sq[nn] + amx*amy; y = sq[nn+1] + dxp*amy; sq[nn] = x; sq[nn+1] = y; nn += mxv; x = sq[nn] + amx*dyp; y = sq[nn+1] + dxp*dyp; sq[nn] = x; sq[nn+1] = y; } /* deposit charge to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { q[i+noff+nxv*(j+moff)] += sq[i+mxv*j]; } } /* deposit charge to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff] += sq[i]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)] += sq[mxv*j]; if (nn > mx) { #pragma omp atomic q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cpporder2l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, moff, npp, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr; float anx, any, edgelx, edgely, edgerx, edgery, dx, dy; int ks[8]; mxy1 = mx1*my1; anx = (float) nx; any = (float) ny; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(j,k,noff,moff,npp,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely,edgerx, \ edgery) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[idimp*(j+nppmx*k)] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[idimp*(j+nppmx*k)] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[1+idimp*(j+nppmx*k)] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[1+idimp*(j+nppmx*k)] = dy; } else { ist += 3; } } if (ist > 0) { ncl[ist+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxy1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks) for (k = 0; k < mxy1; k++) { npp = kpic[k]; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 8; ii++) { if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void cpporderf2l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the cgppushf2l procedure. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, npp, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr; int ks[8]; mxy1 = mx1*my1; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxy1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks) for (k = 0; k < mxy1; k++) { npp = kpic[k]; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 8; ii++) { if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void ccguard2l(float fxy[], int nx, int ny, int nxe, int nye) { /* replicate extended periodic vector field fxy linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 local data */ int j, k; /* copy edges of extended field */ for (k = 0; k < ny; k++) { fxy[2*nx+2*nxe*k] = fxy[2*nxe*k]; fxy[1+2*nx+2*nxe*k] = fxy[1+2*nxe*k]; } for (j = 0; j < nx; j++) { fxy[2*j+2*nxe*ny] = fxy[2*j]; fxy[1+2*j+2*nxe*ny] = fxy[1+2*j]; } fxy[2*nx+2*nxe*ny] = fxy[0]; fxy[1+2*nx+2*nxe*ny] = fxy[1]; return; } /*--------------------------------------------------------------------*/ void caguard2l(float q[], int nx, int ny, int nxe, int nye) { /* accumulate extended periodic scalar field q linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 local data */ int j, k; /* accumulate edges of extended field */ for (k = 0; k < ny; k++) { q[nxe*k] += q[nx+nxe*k]; q[nx+nxe*k] = 0.0; } for (j = 0; j < nx; j++) { q[j] += q[j+nxe*ny]; q[j+nxe*ny] = 0.0; } q[0] += q[nx+nxe*ny]; q[nx+nxe*ny] = 0.0; return; } /*--------------------------------------------------------------------*/ void cmpois22(float complex q[], float complex fxy[], int isign, float complex ffc[], float ax, float ay, float affp, float *we, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 equation used is: fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx], fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s(kx,ky), s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0. q[k][j] = complex charge density for fourier mode (j,k) fxy[k][j][0] = x component of complex force/charge, fxy[k][j][1] = y component of complex force/charge, all for fourier mode (j,k) if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated cimag(ffc[k][j]) = finite-size particle shape factor s for fourier mode (j,k) creal(ffc[k][j]) = potential green's function g for fourier mode (j,k) ax/ay = half-width of particle in x/y direction affp = normalization constant = nx*ny/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2) nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, dkx, dky, at1, at2, at3, at4; float complex zero, zt1, zt2; double wp, sum1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; if (isign != 0) goto L30; /* prepare form factor array */ for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at1 = dky*dky; at2 = pow((dky*ay),2); for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at3 = dkx*dkx + at1; at4 = exp(-0.5*(pow((dkx*ax),2) + at2)); if (at3==0.0) { ffc[j+kk] = affp + 1.0*_Complex_I; } else { ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I; } } } return; /* calculate force/charge and sum field energy */ L30: sum1 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum1) /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ for (k = 1; k < nyh; k++) { k1 = ny - k; dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxy[2*j+2*kj] = at2*zt1; fxy[1+2*j+2*kj] = at3*zt1; fxy[2*j+2*k1] = at2*zt2; fxy[1+2*j+2*k1] = -at3*zt2; wp += at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); } at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxy[2*kj] = zero; fxy[1+2*kj] = at3*zt1; fxy[2*k1] = zero; fxy[1+2*k1] = zero; wp += at1*(q[kj]*conjf(q[kj])); sum1 += wp; } wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = 2*nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxy[2*j] = at2*zt1; fxy[1+2*j] = zero; fxy[2*j+k1] = zero; fxy[1+2*j+k1] = zero; wp += at1*(q[j]*conjf(q[j])); } fxy[0] = zero; fxy[1] = zero; fxy[k1] = zero; fxy[1+k1] = zero; sum1 += wp; *we = sum1*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void cwfft2rinit(int mixup[], float complex sct[], int indx, int indy, int nxhyd, int nxyhd) { /* this subroutine calculates tables needed by a two dimensional real to complex fast fourier transform and its inverse. input: indx, indy, nxhyd, nxyhd output: mixup, sct mixup = array of bit reversed addresses sct = sine/cosine table indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nxy, nxhy, nxyh; int j, k, lb, ll, jb, it; float dnxy, arg; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; /* bit-reverse index table: mixup[j] = 1 + reversed bits of j */ for (j = 0; j < nxhy; j++) { lb = j; ll = 0; for (k = 0; k < indx1y; k++) { jb = lb/2; it = lb - 2*jb; lb = jb; ll = 2*ll + it; } mixup[j] = ll + 1; } /* sine/cosine table for the angles 2*n*pi/nxy */ nxyh = nxy/2; dnxy = 6.28318530717959/(float) nxy; for (j = 0; j < nxyh; j++) { arg = dnxy*(float) j; sct[j] = cosf(arg) - sinf(arg)*_Complex_I; } return; } /*--------------------------------------------------------------------*/ void cfft2rmxx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in x is performed f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx)) if isign = 1, a forward fourier transform in x is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nrxb, joff; float ani; float complex t1, t2, t3; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; if (isign > 0) goto L70; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,ani,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } ani = 2.0*ani; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } return; /* forward fourier transform */ L70: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft2rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in y is performed f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in y is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff; float complex t1, t2; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; if (isign > 0) goto L70; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; t1 = f[i+k1]; f[i+k1] = f[i+koff]; f[i+koff] = t1; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = sct[kmr*j]; t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[koff] + t1) + crealf(f[koff] - t1)*_Complex_I); f[koff] = 0.5*(crealf(f[koff] + t1) + cimagf(f[koff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L70: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[koff] - t1); f[koff] += t1; } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; t1 = f[i+k1]; f[i+k1] = f[i+koff]; f[i+koff] = t1; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = conjf(sct[kmr*j]); t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft2rm2x(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of 2 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms in x are performed f[m][n][0:1] = (1/nx*ny)*sum(f[k][j][0:1]*exp(-sqrt(-1)*2pi*n*j/nx)) if isign = 1, two forward fourier transforms in x are performed f[k][j][0:1] = sum(f[m][n][0:1]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:1] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:1]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:1]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nrxb; float at1, ani; float complex t1, t2, t3; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; if (isign > 0) goto L100; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,ani,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = 2*nxhd*i; /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = cimagf(f[2*j+joff]); f[2*j+joff] = crealf(f[2*j+joff]) + crealf(f[1+2*j+joff])*_Complex_I; f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[2*j1+joff]; t2 = f[1+2*j1+joff]; f[2*j1+joff] = f[2*j+joff]; f[1+2*j1+joff] = f[1+2*j+joff]; f[2*j+joff] = t1; f[1+2*j+joff] = t2; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = t1*f[2*j2+joff]; t3 = t1*f[1+2*j2+joff]; f[2*j2+joff] = f[2*j1+joff] - t2; f[1+2*j2+joff] = f[1+2*j1+joff] - t3; f[2*j1+joff] += t2; f[1+2*j1+joff] += t3; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 2; jj++) { t2 = conjf(f[jj+2*(nxh-j)+joff]); t1 = f[jj+2*j+joff] + t2; t2 = (f[jj+2*j+joff] - t2)*t3; f[jj+2*j+joff] = ani*(t1 + t2); f[jj+2*(nxh-j)+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (jj = 0; jj < 2; jj++) { f[jj+2*nxhh+joff] = ani*conjf(f[jj+2*nxhh+joff]); f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I); } } return; /* forward fourier transform */ L100: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = 2*nxhd*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 2; jj++) { t2 = conjf(f[jj+2*(nxh-j)+joff]); t1 = f[jj+2*j+joff] + t2; t2 = (f[jj+2*j+joff] - t2)*t3; f[jj+2*j+joff] = t1 + t2; f[jj+2*(nxh-j)+joff] = conjf(t1 - t2); } } for (jj = 0; jj < 2; jj++) { f[jj+2*nxhh+joff] = 2.0*conjf(f[jj+2*nxhh+joff]); f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[2*j1+joff]; t2 = f[1+2*j1+joff]; f[2*j1+joff] = f[2*j+joff]; f[1+2*j1+joff] = f[1+2*j+joff]; f[2*j+joff] = t1; f[1+2*j+joff] = t2; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); t2 = t1*f[2*j2+joff]; t3 = t1*f[1+2*j2+joff]; f[2*j2+joff] = f[2*j1+joff] - t2; f[1+2*j2+joff] = f[1+2*j1+joff] - t3; f[2*j1+joff] += t2; f[1+2*j1+joff] += t3; } } ns = ns2; } /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = cimagf(f[2*j+joff]); f[2*j+joff] = crealf(f[2*j+joff]) + crealf(f[1+2*j+joff])*_Complex_I; f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I; } } return; } /*--------------------------------------------------------------------*/ void cfft2rm2y(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of 2 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms in y are performed f[m][n][0:1] = *sum(f[k][j][0:1]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, two forward fourier transforms in y are performed f[k][j][0:1] = sum(f[m][n][0:1]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:1] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:1]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:1]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff; int nryb; float complex t1, t2, t3; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; if (isign > 0) goto L80; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 2*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 2*nxhd*k1; t1 = f[2*i+k1]; t2 = f[1+2*i+k1]; f[2*i+k1] = f[2*i+koff]; f[1+2*i+k1] = f[1+2*i+koff]; f[2*i+koff] = t1; f[1+2*i+koff] = t2; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 2*nxhd*(j + k1); j2 = 2*nxhd*(j + k2); t1 = sct[kmr*j]; t2 = t1*f[2*i+j2]; t3 = t1*f[1+2*i+j2]; f[2*i+j2] = f[2*i+j1] - t2; f[1+2*i+j2] = f[1+2*i+j1] - t3; f[2*i+j1] += t2; f[1+2*i+j1] += t3; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 2*nxhd*k; k1 = 2*nxhd*ny - koff; for (jj = 0; jj < 2; jj++) { t1 = f[jj+k1]; f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1) + crealf(f[jj+koff] - t1)*_Complex_I); f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1) + cimagf(f[jj+koff] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L80: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 2*nxhd*k; k1 = 2*nxhd*ny - koff; for (jj = 0; jj < 2; jj++) { t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; f[jj+k1] = conjf(f[jj+koff] - t1); f[jj+koff] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 2*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 2*nxhd*k1; t1 = f[2*i+k1]; t2 = f[1+2*i+k1]; f[2*i+k1] = f[2*i+koff]; f[1+2*i+k1] = f[1+2*i+koff]; f[2*i+koff] = t1; f[1+2*i+koff] = t2; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 2*nxhd*(j + k1); j2 = 2*nxhd*(j + k2); t1 = conjf(sct[kmr*j]); t2 = t1*f[2*i+j2]; t3 = t1*f[1+2*i+j2]; f[2*i+j2] = f[2*i+j1] - t2; f[1+2*i+j2] = f[1+2*i+j1] - t3; f[2*i+j1] += t2; f[1+2*i+j1] += t3; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cwfft2rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for real to complex fft, with packed data */ /* parallelized with OpenMP */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /*--------------------------------------------------------------------*/ void cwfft2rm2(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for 2 2d real to complex ffts, with packed data */ /* parallelized with OpenMP */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfft2rm2x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ cfft2rm2y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfft2rm2y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ cfft2rm2x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void cdistr2_(float *part, float *vtx, float *vty, float *vdx, float *vdy, int *npx, int *npy, int *idimp, int *nop, int *nx, int *ny, int *ipbc) { cdistr2(part,*vtx,*vty,*vdx,*vdy,*npx,*npy,*idimp,*nop,*nx,*ny,*ipbc); return; } /*--------------------------------------------------------------------*/ void cdblkp2l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mx1, int *mxy1, int *irc) { cdblkp2l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,irc); return; } /*--------------------------------------------------------------------*/ void cppmovin2l_(float *part, float *ppart, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mx1, int *mxy1, int *irc) { cppmovin2l(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1, irc); return; } /*--------------------------------------------------------------------*/ void cppcheck2l_(float *ppart, int *kpic, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *irc) { cppcheck2l(ppart,kpic,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*my1,irc); return; } /*--------------------------------------------------------------------*/ void cgppush2l_(float *ppart, float *fxy, int *kpic, float *qbm, float *dt, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { cgppush2l(ppart,fxy,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*mx,*my, *nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgppushf2l_(float *ppart, float *fxy, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { cgppushf2l(ppart,fxy,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,*nx, *ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgppost2l_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1) { cgppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1, *mxy1); return; } /*--------------------------------------------------------------------*/ void cpporder2l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { cpporder2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,*my, *mx1,*my1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cpporderf2l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { cpporderf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ccguard2l_(float *fxy, int *nx, int *ny, int *nxe, int *nye) { ccguard2l(fxy,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void caguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) { caguard2l(q,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void cmpois22_(float complex *q, float complex *fxy, int *isign, float complex *ffc, float *ax, float *ay, float *affp, float *we, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { cmpois22(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv,*nxhd, *nyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy, int *nxhyd, int *nxyhd) { cwfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rmxx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nyi, int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rmxx(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rmxy_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxi, int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rmxy(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rm2x_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nyi, int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rm2x(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rm2y_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxi, int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rm2y(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cwfft2rmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rm2_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cwfft2rm2(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd); return; }
copy.c
#include "copy.h" void copy_ref(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { b[i] = a[i]; } } void copy_mov(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; asm ("mov %1, %0" : "=m" (b[i]) : "r" (t)); } } void copy_rep_movsq(size_t n, const double * RESTRICT a, double * RESTRICT b) { /* It might make more sense to do rep-movsq a page at a time * and make the alignment nicer... */ #ifdef _OPENMP #pragma omp parallel { int me = omp_get_thread_num(); int nt = omp_get_num_threads(); size_t chunk = 1+(n-1)/nt; size_t start = me*chunk; size_t end = (me+1)*chunk; if (end>n) end = n; size_t tn = (end>start) ? end-start : 0; //const double * RESTRICT ta = &( a[start] ); // double * RESTRICT tb = &( b[start] ); const double * RESTRICT ta = a+start; double * RESTRICT tb = b+start; //printf("zzz %d: chunk=%zu\n", me, chunk); fflush(stdout); //printf("zzz %d: start=%zu\n", me, start); fflush(stdout); //printf("zzz %d: xend=%zu\n", me, end); fflush(stdout); //printf("zzz %d: count=%zd\n", me, tn); fflush(stdout); #ifdef __INTEL_COMPILER asm("rep movsq" : "=D" (tb), "=S" (ta), "=c" (tn) : "0" (tb), "1" (ta), "2" (tn) : "memory"); #else tn *= sizeof(double); memcpy(tb,ta,tn); #endif } #else { #if HAS_GNU_EXTENDED_ASM asm("rep movsq" : "=D" (b), "=S" (a), "=c" (n) : "0" (b), "1" (a), "2" (n) : "memory"); #else tn *= sizeof(double); memcpy(b,a,n*sizeof(double)); #endif } #endif } #ifdef __SSE__ #if 0 /* BROKEN */ void copy_movntq(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; // movntq does not work here... asm ("movntq %1, %0" : "=m" (b[i]) : "r" (t)); } asm ("sfence" ::: "memory"); } #endif #ifdef __INTEL_COMPILER void copy_movntq64(size_t n, const double * RESTRICT a, double * RESTRICT b) { //_mm_empty(); OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { __m64 t = _m_from_int64( *(__int64*)&(a[i]) ); _mm_stream_pi( (__m64*)&(b[i]), (__m64)t); } _mm_sfence(); } #endif /* ICC */ #endif /* SSE */ #ifdef __SSE2__ void copy_movnti(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; asm ("movnti %1, %0" : "=m" (b[i]) : "r" (t)); } asm ("sfence" ::: "memory"); } #ifdef __INTEL_COMPILER void copy_movnti64(size_t n, const double * RESTRICT a, double * RESTRICT b) { //_mm_empty(); OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { __m64 t = _m_from_int64( *(__int64*)&(a[i]) ); _mm_stream_si64( (__int64*)&(b[i]), *(__int64*)&t); } _mm_sfence(); } #endif /* ICC */ void copy_movapd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_load_pd( &(a[i]) ); _mm_store_pd( &(b[i]), t); } } void copy_movntpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_load_pd( &(a[i]) ); _mm_stream_pd( &(b[i]), t); } _mm_sfence(); } #endif /* SSE2 */ #ifdef __SSE4_1__ void copy_movntdqa128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128i t = _mm_stream_load_si128( (__m128i*)&(a[i]) ); _mm_stream_si128 ( (__m128i*)&(b[i]), t); } _mm_sfence(); } #endif /* SSE4.1 */ #ifdef __AVX__ void copy_vmovapd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_load_pd( &(a[i]) ); _mm256_store_pd( &(b[i]), t); } } void copy_vmovntpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_load_pd( &(a[i]) ); _mm256_stream_pd( &(b[i]), t); } _mm_sfence(); } #endif /* AVX */ #ifdef __AVX2__ void copy_vmovntdqa256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256i t = _mm256_stream_load_si256( (__m256i*)&(a[i]) ); _mm256_stream_si256 ( (__m256i*)&(b[i]), t); } _mm_sfence(); } void copy_vgatherdpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi32(-1,-1,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_i32gather_pd( &(a[i]), vindex, 8 /* scale */ ); _mm_storel_pd( &(b[i ]), t); _mm_storeh_pd( &(b[i+1]), t); } } void copy_vgatherqpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi64x(1,0); // works OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_i64gather_pd( &(a[i]), vindex, 8 /* scale */ ); _mm_storel_pd( &(b[i ]), t); _mm_storeh_pd( &(b[i+1]), t); } } void copy_vgatherdpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi32(3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_i32gather_pd( &(a[i]), vindex, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } void copy_vgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_i64gather_pd( &(a[i]), vindex, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } void copy_mvgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works // O in OQ means ordered, i.e. AND. unordered is OR. Q means quiet i.e. non-signaling. __m256d src = _mm256_cmp_pd(_mm256_setzero_pd(),_mm256_setzero_pd(),_CMP_EQ_OQ); // sets all bits to 1 __m256d mask = src; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_mask_i64gather_pd( src, &(a[i]), vindex, mask, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } #endif /* AVX2 */ #ifdef __AVX512F__ void copy_vmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_load_pd( &(a[i]) ); _mm512_store_pd( &(b[i]), t); } } void copy_vmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_loadu_pd( &(a[i]) ); _mm512_storeu_pd( &(b[i]), t); } } void copy_mvmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_load_pd( src, k, &(a[i]) ); _mm512_mask_store_pd( &(b[i]), k, t); } } void copy_mvmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_loadu_pd( src, k, &(a[i]) ); _mm512_mask_storeu_pd( &(b[i]), k, t); } } void copy_vmovntpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_load_pd( &(a[i]) ); _mm512_stream_pd( &(b[i]), t); } _mm_sfence(); } void copy_vmovntdqa512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512i t = _mm512_stream_load_si512( (__m512i*)&(a[i]) ); _mm512_stream_si512 ( (__m512i*)&(b[i]), t); } _mm_sfence(); } void copy_vGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_i32gather_pd(vindex, &(a[i]), 8 /* scale */ ); _mm512_i32scatter_pd( &(b[i]), vindex, t, 8 /* scale */ ); } } void copy_mvGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_i32gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ ); _mm512_mask_i32scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ ); } } void copy_vGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0); OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_i64gather_pd(vindex, &(a[i]), 8 /* scale */ ); _mm512_i64scatter_pd( &(b[i]), vindex, t, 8 /* scale */ ); } } void copy_mvGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0); OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_i64gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ ); _mm512_mask_i64scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ ); } } #endif /* AVX-512F */
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp32_fp32) // op(A') function: GB (_unop_tran__signum_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_fp32 // op(A') function: GB_tran__minv_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_fp32 ( uint64_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,DoublePixelPacket *alpha_pixel) { MagickRealType alpha; alpha_pixel->index=0; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelLuma(image,image->colormap+0) > GetPixelLuma(image,image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; midpoint.index=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale* ClampPixel(pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.opacity*error.opacity); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*ClampPixel( pixel.opacity); else node_info->total_color.opacity+=count*QuantumScale* ClampPixel(OpaqueOpacity); p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register DoublePixelPacket *magick_restrict q; register MagickRealType alpha, beta, distance; register PixelPacket *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=GetPixelAlpha(p)-GetPixelAlpha(q); distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { double gamma; gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity)); gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,&image->exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, gamma, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs((double) (alpha*GetPixelRed(p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); gamma=PerceptibleReciprocal(area); image->error.mean_error_per_pixel=gamma*mean_error_per_pixel; image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->matte == MagickFalse) { if (SetImageGray(image,&image->exception) != MagickFalse) (void) SetGrayscaleImage(image); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; if (SetImageGray(image,&image->exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % MagickRealType *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset, MagickRealType *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int MagickRealTypeCompare(const void *error_p,const void *error_q) { MagickRealType *p, *q; p=(MagickRealType *) error_p; q=(MagickRealType *) error_q; if (*p > *q) return(1); if (fabs((double) (*q-*p)) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { MagickRealType *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (MagickRealType *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType), MagickRealTypeCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(MagickRealType *) RelinquishMagickMemory( quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; int intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; (void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
blake2sp.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #include "erl_nif.h" #define PARALLELISM_DEGREE 8 static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset, const void *salt, const void *personal, const uint8_t saltlen, const uint8_t personallen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store48( P->node_offset, offset ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; if (saltlen) memcpy( P->salt, salt, BLAKE2S_SALTBYTES ); else memset(P->salt, 0, sizeof( P->salt )); if (personallen) memcpy( P->personal, personal, BLAKE2S_PERSONALBYTES ); else memset(P->personal, 0, sizeof(P->personal)); return blake2s_init_param( S, P ); } static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen, const void *salt, const void *personal, const uint8_t saltlen, const uint8_t personallen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store48( P->node_offset, 0ULL ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; if (saltlen) memcpy( P->salt, salt, BLAKE2S_SALTBYTES ); else memset(P->salt, 0, sizeof( P->salt )); if (personallen) memcpy( P->personal, personal, BLAKE2S_PERSONALBYTES ); else memset(P->personal, 0, sizeof(P->personal)); return blake2s_init_param( S, P ); } ERL_NIF_TERM blake2sp_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; ErlNifBinary input, key, salt, personal; uint8_t out[BLAKE2S_OUTBYTES] = {0}; unsigned int outlen; int i; ERL_NIF_TERM tmphash[BLAKE2S_OUTBYTES]; if (argc != 5 || !enif_inspect_binary(env, argv[0], &input) || !enif_inspect_binary(env, argv[1], &key) || !enif_get_uint(env, argv[2], &outlen) || !enif_inspect_binary(env, argv[3], &salt) || !enif_inspect_binary(env, argv[4], &personal)) return enif_make_badarg(env); if (!outlen || outlen > BLAKE2S_OUTBYTES) return -1; if( key.size > BLAKE2S_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, key.size, i, salt.data, personal.data, salt.size, personal.size) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( key.size > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key.data, key.size ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = input.size; const uint8_t *in__ = ( const uint8_t * )input.data; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[id__], in__, len ); } blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, key.size, salt.data, personal.data, salt.size, personal.size) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); blake2s_final( FS, out, outlen );; for (i = 0; i < outlen; i++) { tmphash[i] = enif_make_uint(env, out[i]); } return enif_make_list_from_array(env, tmphash, outlen); } static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) { return 0; } static ErlNifFunc blake2sp_nif_funcs[] = { {"hash_nif", 5, blake2sp_hash} }; ERL_NIF_INIT(Elixir.Blake2.Blake2sp, blake2sp_nif_funcs, NULL, NULL, upgrade, NULL)
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int i; #pragma omp parallel for private(i) num_threads(5) schedule(dynamic) for(i = 0; i < 16; i++) { sleep(i); printf("Il thread %d ha completato iterazione %d.\n", omp_get_thread_num() , i); } printf("Tutti i thread hanno terminato! \n"); return 0; }
cryptsha256_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code and Drepper's spec at * http://www.akkadia.org/drepper/SHA-crypt.txt * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * Ported to SSE2, May 2013, JimF. A little harder than some, since we have to * group and rearrange passwords based upon length. We must only run passwords * of a specific block group size in 1 SSE_COEF_SHA256 bundle. If we later do * PARA_SHA256, then each bundle of SSE_COEF_SHA256*PARA_SHA256 will have to be * made up of passwords of same block group size. * * Here are the block sizes per password length. To be equal group size, all * numbers for 2 passwords must be equal all the way across. So, password lengths * of 0, 1, 2, 3 are 1 group. 4, 5, 6, 7 are another group. 8,9,10,11 are another, * 12-23 are another and the final is 24-35. So there are 5 'groups' of lengths. We * could skip the length 0,1,2,3 group * * Here is the raw block length data. The Len: cp pspc cspp ppc cpp psc csp pc 0 : 1 1 1 1 1 1 1 1 1 : 1 1 1 1 1 1 1 1 2 : 1 1 1 1 1 1 1 1 3 : 1 1 1 1 1 1 1 1 4 : 1 2 2 1 1 1 1 1 5 : 1 2 2 1 1 1 1 1 6 : 1 2 2 1 1 1 1 1 7 : 1 2 2 1 1 1 1 1 8 : 1 2 2 1 1 2 2 1 9 : 1 2 2 1 1 2 2 1 10 : 1 2 2 1 1 2 2 1 11 : 1 2 2 1 1 2 2 1 12 : 1 2 2 2 2 2 2 1 13 : 1 2 2 2 2 2 2 1 14 : 1 2 2 2 2 2 2 1 15 : 1 2 2 2 2 2 2 1 16 : 1 2 2 2 2 2 2 1 17 : 1 2 2 2 2 2 2 1 18 : 1 2 2 2 2 2 2 1 19 : 1 2 2 2 2 2 2 1 20 : 1 2 2 2 2 2 2 1 21 : 1 2 2 2 2 2 2 1 22 : 1 2 2 2 2 2 2 1 23 : 1 2 2 2 2 2 2 1 24 : 2 2 2 2 2 2 2 2 25 : 2 2 2 2 2 2 2 2 26 : 2 2 2 2 2 2 2 2 27 : 2 2 2 2 2 2 2 2 28 : 2 2 2 2 2 2 2 2 29 : 2 2 2 2 2 2 2 2 30 : 2 2 2 2 2 2 2 2 31 : 2 2 2 2 2 2 2 2 32 : 2 2 2 2 2 2 2 2 33 : 2 2 2 2 2 2 2 2 34 : 2 2 2 2 2 2 2 2 35 : 2 2 2 2 2 2 2 2 Source to make above table (made up to 40,but over 35 is 3 limbs) #include <stdio.h> int c=32, s=16; _inline int S(int sz) { if (sz<=55) return 1; else if (sz <= 55+64) return 2; else return 3; } void proc(int p) { int cp=p+c; printf("%-2d : %d %d %d %d %d %d %d %d\n", p,S(cp),S(cp+s+p),S(cp+s+p),S(cp+p),S(cp+p),S(cp+s),S(cp+s),S(cp)); } void main() { int i; printf ("Len: cp pspc cspp ppc cpp psc csp pc\n"); for (i = 0; i < 40; ++i) proc(i); } */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cryptsha256; #elif FMT_REGISTERS_H john_register_one(&fmt_cryptsha256); #else #include "arch.h" // Helpful for debugging (at times). //#if ARCH_BITS==32 && ARCH_LITTLE_ENDIAN == 1 //#define FORCE_GENERIC_SHA2 //#endif #include "sha2.h" #define _GNU_SOURCE 1 #include <string.h> #include "params.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "sse-intrinsics.h" #ifdef MMX_COEF_SHA256 // there are problems with SSE OMP builds. Until found, simply do not allow OMP. //#undef _OPENMP //#undef FMT_OMP //#define FMT_OMP 0 // Well, I tried by turning of OMP, but the run still failed. So, I will simply // leave OMP on, but turn off SSE in an OMP build, until I get this figured out. //#undef MMX_COEF_SHA256 #endif #ifdef _OPENMP #define OMP_SCALE 8 #include <omp.h> #endif #include "memdbg.h" // NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing // to 'group' passwords differently, so that we have lengths which 'share' the same number // of crypt block counts for each 'type'. We may want to scale as much as 128 or so, just // to try to have better saturation. If we only had 8 passwords given to us, and they were // one each of these lengths: 3 7 8 12 13 14 15 21, in theory, we could do this // with only 2 SSE calls (MMX_COEF==4 for SHA256). However, length 3 has to to run by itself, // length 7 by itself, 8 by itself, and the rest can run together, but there are 5 of them, // so it takes to runs. So, instead of 2 runs, we have to do 5 runs. Not very efficient. // however, if we have a lot more passwords to work with, we can re-arrange them, to run // them in groups that all 'fit' together, and do so until we exhaust all from a given length // range, then do all in the next range. Thus, until we get to the last set within a length // range, we are doing a fully packed SSE run, and having a LOT less wasted space. This will // get even more interesting, when we start doing OMP, but it should just be the same principal, // preload more passwords, and group them, then run the OMP threads over a single length, then // go to the next length, until done, trying to keep each thread running, and keeping each block // of SSE data full, until the last in a range. We probably can simply build all the rearrangments, // then let the threads go on ALL data, without caring about the length, since each thread will only // be working on passwords in a single MMX buffer that all match, at any given moment. // #undef MMX_COEF_SHA256 #ifdef MMX_COEF_SHA256 #ifdef _OPENMP #define MMX_COEF_SCALE (128/MMX_COEF_SHA256) #else #define MMX_COEF_SCALE (256/MMX_COEF_SHA256) #endif #else #define MMX_COEF_SCALE 1 #endif #define FORMAT_LABEL "sha256crypt" #ifdef MMX_COEF_SHA256 #define ALGORITHM_NAME SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif // 35 character input is MAX password that fits into 2 SHA256 blocks // 35 character input creates a 118 byte buffer, plus 1 for 0x80 and // 1 unused byte and 8 byte bit length. That is max for a 2 block crypt #define PLAINTEXT_LENGTH 35 #define CIPHERTEXT_LENGTH 43 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_LENGTH 16 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #ifdef MMX_COEF_SHA256 #define MAX_KEYS_PER_CRYPT MMX_COEF_SHA256 #else #define MAX_KEYS_PER_CRYPT 1 #endif #include "cryptsha256_common.h" static struct fmt_tests tests[] = { {"$5$LKO/Ute40T3FNF95$U0prpBQd4PloSGU0pnpM4z9wKn4vZ1.jsrzQfPqxph9", "U*U*U*U*"}, {"$5$LKO/Ute40T3FNF95$fdgfoJEBoMajNxCv3Ru9LyQ0xZgv0OBMQoq80LQ/Qd.", "U*U***U"}, {"$5$LKO/Ute40T3FNF95$8Ry82xGnnPI/6HtFYnvPBTYgOL23sdMXn8C29aO.x/A", "U*U***U*"}, // this 35 char PW 'should' work, in 1 & 2 buffer code, but it changes the // benchmark timings, so has been removed. Uncomment, test your build, then re-comment it. // {"$5$mTfUlwguIR0Gp2ed$nX5lzmEGAZQ.1.CcncGnSq/lxSF7t1P.YkVlljQfOC2", "01234567890123456789012345678901234"}, {"$5$9mx1HkCz7G1xho50$O7V7YgleJKLUhcfk9pgzdh3RapEaWqMtEp9UUBAKIPA", "*U*U*U*U"}, {"$5$kc7lRD1fpYg0g.IP$d7CMTcEqJyTXyeq8hTdu/jB/I6DGkoo62NXbHIR7S43", ""}, // A 36 byte PW fails with newest code. It would require 3 block SHA buffering. // We only handle 1 and 2, at the current time. //{"$5$aewWTiO8RzEz5FBF$CZ3I.vdWF4omQXMQOv1g3XarjhH0wwR29Jwzt6/gvV/", "012345678901234567890123456789012345"}, // here is a test case for rounds=50000. Works, but slows down self test a lot (but not benchmarks) // so, it is best to uncomment after changes, test that this still works, then comment out before release. //{"$5$rounds=50000$LKO/Ute40T3FNF95$S51z7fjx29wblQAQbkqY7G8ExS18kQva39ur8FG5VS0", "U*U*U*U*"}, {NULL} }; /* This structure is 'pre-loaded' with the keyspace of all possible crypts which */ /* will be performed WITHIN the inner loop. There are 8 possible buffers that */ /* are used. They are cp, pspc, cspp, ppc, cpp, psc, csp, and pc, where p stands */ /* for the 'hash' built from the password (and it is the same length as the */ /* password), s stands for the hash built from the salt (same size as salt), and */ /* c stands for the crypt results from the prior loop. There are 8 possible */ /* buffer layouts listed, but they fall into a pattern that is 42 long (2*3*7) */ /* this structure encapsulates this. we build this buffer, after computing the */ /* s hash, the p hash, and the starting c values. Then, within the inner loop, */ /* we simply spin through this structure, calling the SHA256 code to do the work. */ /* NOTE, most of the time, there will be 1 block and 2 block crypts. As the */ /* the password length grows, the more 2 block crypts there are, thus slower */ /**/ /* for SSE only, but 'could' be done for sha2.c code (jtr sha2) */ /* This keyspace was changed, to be put into BE at the start, and then we never */ /* do any swapping, but keep it in BE format from that point on. To do this, we */ /* changed the pointers to be a pointer to the start of the block, AND an offset */ /* for SSE, we need a pointer to the start of the block[0], and the offset. The */ /* index needed will be known in the crypt_all. This means we need something */ /* similar to out GET_POS macros, but also for oSSL formats. */ /* To do this, we have to use the JtR sha2.c functions, since there is this func: */ /* sha256_hash_block(&CTX, data, int perform_endian_swap). So if we set the last */ /* param to 0, we can call this function, and it will avoid the byte swapping */ #ifndef MMX_COEF_SHA256 #define BLKS 1 #else #define BLKS MMX_COEF_SHA256 #endif typedef struct cryptloopstruct_t { unsigned char buf[8*2*64*BLKS]; // will allocate to hold 42 2 block buffers (42 * 2 * 64) Reduced to only requiring 8*2*64 // now, the cryptstructs are on the stack within the crypt for loop, so we avoid allocation. // and to avoid the single static variable, or a static array. unsigned char *bufs[BLKS][42]; // points to the start of each 2 block buffer. #ifdef MMX_COEF_SHA256 int offs[BLKS][42]; #endif unsigned char *cptr[BLKS][42]; // points to where we copy the crypt pointer for next round. // Round 0 points to somewhere in round 1's buffer, etc. int datlen[42]; // if 1, then this is a small, only 1 block crypt. Some rounds for shorter passwords take only 1 crypt block. // NOTE, datlen could be changed to a number, and then we could do > 2 block crypts. Would take a little // more memory (and longer PW's certainly DO take more time), but it should work fine. It may be an issue // especially when doing OMP, that the memory footprint of this 'hot' inner loop simply gets too big, and // things slow down. For now, we are limiting ourselves to 35 byte password, which fits into 2 SHA256 buffers } cryptloopstruct; static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static int max_crypts; /* these 2 values are used in setup of the cryptloopstruct, AND to do our SHA256_Init() calls, in the inner loop */ static const unsigned char padding[128] = { 0x80, 0 /* 0,0,0,0.... */ }; #ifndef JTR_INC_COMMON_CRYPTO_SHA2 static const ARCH_WORD_32 ctx_init[8] = {0x6A09E667,0xBB67AE85,0x3C6EF372,0xA54FF53A,0x510E527F,0x9B05688C,0x1F83D9AB,0x5BE0CD19}; #endif static struct saltstruct { unsigned int len; unsigned int rounds; unsigned char salt[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { int omp_t = 1; #ifdef _OPENMP omp_t = omp_get_max_threads(); omp_t *= OMP_SCALE; #endif max_crypts = MMX_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT; self->params.max_keys_per_crypt = max_crypts; // we allocate 1 more than needed, and use that 'extra' value as a zero length PW to fill in the // tail groups in MMX mode. saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * (1+max_crypts), MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * (1+max_crypts), MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * (1+max_crypts), MEM_ALIGN_WORD); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_key_length[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } /* These are the 8 types of buffers this algorithm uses: cp pspc cspp ppc cpp psc csp pc */ static void LoadCryptStruct(cryptloopstruct *crypt_struct, int index, int idx, char *p_bytes, char *s_bytes) { unsigned len_pc, len_ppsc, len_ppc, len_psc; // length of 'data' unsigned tot_pc, tot_ppsc, tot_ppc, tot_psc; // length of entire block to crypt (64 or 128) unsigned off_pc, off_pspc, off_ppc, off_psc; // offset to the crypt ptr for these 4 'types'. unsigned dlen_pc, dlen_ppsc, dlen_ppc, dlen_psc; // is this 1 or 2 block (or actual len for CommonCrypto, since it uses SHA256_Final() unsigned plen=saved_key_length[index]; unsigned char *cp = crypt_struct->buf; cryptloopstruct *pstr = crypt_struct; #ifdef MMX_COEF_SHA256 // in SSE mode, we FORCE every buffer to be 2 blocks, even if it COULD fit into 1. // Then we simply use the 2 block SSE code. unsigned char *next_cp; #endif len_pc = plen + BINARY_SIZE; len_ppsc = (plen<<1) + cur_salt->len + BINARY_SIZE; len_ppc = (plen<<1) + BINARY_SIZE; len_psc = plen + cur_salt->len + BINARY_SIZE; #ifdef JTR_INC_COMMON_CRYPTO_SHA2 if (len_pc <=55) tot_pc =64; else tot_pc =128; if (len_ppsc<=55) tot_ppsc=64; else tot_ppsc=128; if (len_ppc <=55) tot_ppc =64; else tot_ppc =128; if (len_psc <=55) tot_psc =64; else tot_psc =128; dlen_pc =len_pc; dlen_ppsc=len_ppsc; dlen_ppc =len_ppc; dlen_psc =len_psc; #else if (len_pc <=55) {tot_pc =64; dlen_pc =64;}else{tot_pc =128; dlen_pc =128; } if (len_ppsc<=55) {tot_ppsc=64; dlen_ppsc=64;}else{tot_ppsc=128; dlen_ppsc=128; } if (len_ppc <=55) {tot_ppc =64; dlen_ppc =64;}else{tot_ppc =128; dlen_ppc =128; } if (len_psc <=55) {tot_psc =64; dlen_psc =64;}else{tot_psc =128; dlen_psc =128; } #endif off_pc = len_pc - BINARY_SIZE; off_pspc = len_ppsc - BINARY_SIZE; off_ppc = len_ppc - BINARY_SIZE; off_psc = len_psc - BINARY_SIZE; // Adjust cp for idx; #ifdef MMX_COEF_SHA256 next_cp = cp + (2*64*BLKS); #endif // pstr->buf[0] is a cp (First of this type) pstr->bufs[idx][0] = pstr->cptr[idx][41] = cp; // For fist element only, we DO copy in the c value. memcpy(cp, crypt_out[index], BINARY_SIZE); cp += BINARY_SIZE; memcpy(cp, p_bytes, plen); cp += plen; if (!idx) pstr->datlen[0] = dlen_pc; memcpy(cp, padding, tot_pc-2-len_pc); cp += (tot_pc-len_pc); pstr->bufs[idx][0][tot_pc-2] = (len_pc<<3)>>8; pstr->bufs[idx][0][tot_pc-1] = (len_pc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[1] is a pspc (First of this type) pstr->bufs[idx][1] = cp; pstr->cptr[idx][0] = cp + off_pspc; memcpy(cp, p_bytes, plen); cp += plen; memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len; memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE); if (!idx) pstr->datlen[1] = dlen_ppsc; memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc); pstr->bufs[idx][1][tot_ppsc-2] = (len_ppsc<<3)>>8; pstr->bufs[idx][1][tot_ppsc-1] = (len_ppsc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[2] is a cspp (First of this type) pstr->bufs[idx][2] = pstr->cptr[idx][1] = cp; cp += BINARY_SIZE; memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len; memcpy(cp, p_bytes, plen); cp += plen; memcpy(cp, p_bytes, plen); cp += plen; if (!idx) pstr->datlen[2] = dlen_ppsc; memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc); pstr->bufs[idx][2][tot_ppsc-2] = (len_ppsc<<3)>>8; pstr->bufs[idx][2][tot_ppsc-1] = (len_ppsc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[3] is a ppc (First of this type) pstr->bufs[idx][3] = cp; pstr->cptr[idx][2] = cp + off_ppc; memcpy(cp, p_bytes, plen); cp += plen; memcpy(cp, p_bytes, plen); cp +=(plen+BINARY_SIZE); if (!idx) pstr->datlen[3] = dlen_ppc; memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc); pstr->bufs[idx][3][tot_ppc-2] = (len_ppc<<3)>>8; pstr->bufs[idx][3][tot_ppc-1] = (len_ppc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[4] is a cspp (from 2) pstr->bufs[idx][4] = pstr->cptr[idx][3] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[4] = dlen_ppsc; // pstr->buf[5] is a pspc (from [1]) pstr->bufs[idx][5] = pstr->bufs[idx][1]; pstr->cptr[idx][4] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[5] = dlen_ppsc; // pstr->buf[6] is a cpp (First of this type) pstr->bufs[idx][6] = pstr->cptr[idx][5] = cp; cp += BINARY_SIZE; memcpy(cp, p_bytes, plen); cp += plen; memcpy(cp, p_bytes, plen); cp += plen; if (!idx) pstr->datlen[6] = dlen_ppc; memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc); pstr->bufs[idx][6][tot_ppc-2] = (len_ppc<<3)>>8; pstr->bufs[idx][6][tot_ppc-1] = (len_ppc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[07] psc (First of this type) pstr->bufs[idx][7] = cp; pstr->cptr[idx][6] = cp + off_psc; memcpy(cp, p_bytes, plen); cp += plen; memcpy(cp, s_bytes, cur_salt->len); cp += (cur_salt->len+BINARY_SIZE); if (!idx) pstr->datlen[7] = dlen_psc; memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc); pstr->bufs[idx][7][tot_psc-2] = (len_psc<<3)>>8; pstr->bufs[idx][7][tot_psc-1] = (len_psc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[08] cspp (from 2) pstr->bufs[idx][8] = pstr->cptr[idx][7] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[8] = dlen_ppsc; // pstr->buf[09] ppc (from 3) pstr->bufs[idx][9] = pstr->bufs[idx][3]; pstr->cptr[idx][8] = pstr->cptr[idx][2]; if (!idx) pstr->datlen[9] = dlen_ppc; // pstr->buf[10] cspp (from 2) pstr->bufs[idx][10] = pstr->cptr[idx][9] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[10] = dlen_ppsc; // pstr->buf[11] pspc (from 1) pstr->bufs[idx][11] = pstr->bufs[idx][1]; pstr->cptr[idx][10] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[11] = dlen_ppsc; // pstr->buf[12] cpp (from 6) pstr->bufs[idx][12] = pstr->cptr[idx][11] = pstr->bufs[idx][6]; if (!idx) pstr->datlen[12] = dlen_ppc; // pstr->buf[13] pspc (from 1) pstr->bufs[idx][13] = pstr->bufs[idx][1]; pstr->cptr[idx][12] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[13] = dlen_ppsc; // pstr->buf[14] csp (First of this type) pstr->bufs[idx][14] = pstr->cptr[idx][13] = cp; cp += BINARY_SIZE; memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len; memcpy(cp, p_bytes, plen); cp += plen; if (!idx) pstr->datlen[14] = dlen_psc; memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc); pstr->bufs[idx][14][tot_psc-2] = (len_psc<<3)>>8; pstr->bufs[idx][14][tot_psc-1] = (len_psc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[15] ppc (from 3) pstr->bufs[idx][15] = pstr->bufs[idx][3]; pstr->cptr[idx][14] = pstr->cptr[idx][2]; if (!idx) pstr->datlen[15] = dlen_ppc; // pstr->buf[16] cspp (from 2) pstr->bufs[idx][16] = pstr->cptr[idx][15] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[16] = dlen_ppsc; // pstr->buf[17] pspc (from 1) pstr->bufs[idx][17] = pstr->bufs[idx][1]; pstr->cptr[idx][16] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[17] = dlen_ppsc; // pstr->buf[18] cpp (from 6) pstr->bufs[idx][18] = pstr->cptr[idx][17] = pstr->bufs[idx][6]; if (!idx) pstr->datlen[18] = dlen_ppc; // pstr->buf[19] pspc (from 1) pstr->bufs[idx][19] = pstr->bufs[idx][1]; pstr->cptr[idx][18] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[19] = dlen_ppsc; // pstr->buf[20] cspp (from 2) pstr->bufs[idx][20] = pstr->cptr[idx][19] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[20] = dlen_ppsc; // pstr->buf[21] pc (First of this type) pstr->bufs[idx][21] = cp; pstr->cptr[idx][20] = cp + off_pc; memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE); if (!idx) pstr->datlen[21] = dlen_pc; memcpy(cp, padding, tot_psc-2-len_pc); pstr->bufs[idx][21][tot_pc-2] = (len_pc<<3)>>8; pstr->bufs[idx][21][tot_pc-1] = (len_pc<<3)&0xFF; #ifdef MMX_COEF_SHA256 cp = next_cp; next_cp = cp + (2*64*BLKS); #endif // pstr->buf[22] cspp (from 2) pstr->bufs[idx][22] = pstr->cptr[idx][21] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[22] = dlen_ppsc; // pstr->buf[23] pspc (from 1) pstr->bufs[idx][23] = pstr->bufs[idx][1]; pstr->cptr[idx][22] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[23] = dlen_ppsc; // pstr->buf[24] cpp (from 6) pstr->bufs[idx][24] = pstr->cptr[idx][23] = pstr->bufs[idx][6]; if (!idx) pstr->datlen[24] = dlen_ppc; // pstr->buf[25] pspc (from 1) pstr->bufs[idx][25] = pstr->bufs[idx][1]; pstr->cptr[idx][24] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[25] = dlen_ppsc; // pstr->buf[26] cspp (from 2) pstr->bufs[idx][26] = pstr->cptr[idx][25] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[26] = dlen_ppsc; // pstr->buf[27] ppc (from 3) pstr->bufs[idx][27] = pstr->bufs[idx][3]; pstr->cptr[idx][26] = pstr->cptr[idx][2]; if (!idx) pstr->datlen[27] = dlen_ppc; // pstr->buf[28] csp (from 14) pstr->bufs[idx][28] = pstr->cptr[idx][27] = pstr->bufs[idx][14]; if (!idx) pstr->datlen[28] = dlen_psc; // pstr->buf[29] pspc (from 1) pstr->bufs[idx][29] = pstr->bufs[idx][1]; pstr->cptr[idx][28] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[29] = dlen_ppsc; // pstr->buf[30] cpp (from 6) pstr->bufs[idx][30] = pstr->cptr[idx][29] = pstr->bufs[idx][6]; if (!idx) pstr->datlen[30] = dlen_ppc; // pstr->buf[31] pspc (from 1) pstr->bufs[idx][31] = pstr->bufs[idx][1]; pstr->cptr[idx][30] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[31] = dlen_ppsc; // pstr->buf[32] cspp (from 2) pstr->bufs[idx][32] = pstr->cptr[idx][31] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[32] = dlen_ppsc; // pstr->buf[33] ppc (from 3) pstr->bufs[idx][33] = pstr->bufs[idx][3]; pstr->cptr[idx][32] = pstr->cptr[idx][2]; if (!idx) pstr->datlen[33] = dlen_ppc; // pstr->buf[34] cspp (from 2) pstr->bufs[idx][34] = pstr->cptr[idx][33] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[34] = dlen_ppsc; // pstr->buf[35] psc (from 7) pstr->bufs[idx][35] = pstr->bufs[idx][7]; pstr->cptr[idx][34] = pstr->cptr[idx][6]; if (!idx) pstr->datlen[35] = dlen_psc; // pstr->buf[36] cpp (from 6) pstr->bufs[idx][36] = pstr->cptr[idx][35] = pstr->bufs[idx][6]; if (!idx) pstr->datlen[36] = dlen_ppc; // pstr->buf[37] pspc (from 1) pstr->bufs[idx][37] = pstr->bufs[idx][1]; pstr->cptr[idx][36] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[37] = dlen_ppsc; // pstr->buf[38] cspp (from 2) pstr->bufs[idx][38] = pstr->cptr[idx][37] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[38] = dlen_ppsc; // pstr->buf[39] ppc (from 3) pstr->bufs[idx][39] = pstr->bufs[idx][3]; pstr->cptr[idx][38] = pstr->cptr[idx][2]; if (!idx) pstr->datlen[39] = dlen_ppc; // pstr->buf[40] cspp (from 2) pstr->bufs[idx][40] = pstr->cptr[idx][39] = pstr->bufs[idx][2]; if (!idx) pstr->datlen[40] = dlen_ppsc; // pstr->buf[41] pspc (from 1) pstr->bufs[idx][41] = pstr->bufs[idx][1]; pstr->cptr[idx][40] = pstr->cptr[idx][0]; if (!idx) pstr->datlen[41] = dlen_ppsc; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; int *MixOrder, tot_todo; // static int times=-1; // ++times; // if (times==1) { // printf ("\nKey = %*.*s\n", saved_key_length[0], saved_key_length[0], saved_key[0]); // } #ifdef MMX_COEF_SHA256 // group based upon size splits. MixOrder = mem_alloc(sizeof(int)*(count+5*MMX_COEF_SHA256)); { const int lens[6] = {0,4,8,12,24,36}; int j; tot_todo = 0; saved_key_length[count] = 0; // point all 'tail' MMX buffer elements to this location. for (j = 0; j < 5; ++j) { for (index = 0; index < count; ++index) { if (saved_key_length[index] >= lens[j] && saved_key_length[index] < lens[j+1]) MixOrder[tot_todo++] = index; } while (tot_todo & (MMX_COEF_SHA256-1)) MixOrder[tot_todo++] = count; } } printf ("tot_todo=%d count+5*MMX_COEF_SHA256=%d\n", tot_todo, count+5*MMX_COEF_SHA256); #else // no need to mix. just run them one after the next, in any order. MixOrder = mem_alloc(sizeof(int)*count); for (index = 0; index < count; ++index) MixOrder[index] = index; tot_todo = count; #endif #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT) { // portably align temp_result char * pointer machine word size. union xx { unsigned char c[BINARY_SIZE]; ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)]; } u; unsigned char *temp_result = u.c; SHA256_CTX ctx; SHA256_CTX alt_ctx; size_t cnt; int idx; char *cp; char p_bytes[PLAINTEXT_LENGTH+1]; char s_bytes[PLAINTEXT_LENGTH+1]; JTR_ALIGN(16) cryptloopstruct crypt_struct; #ifdef MMX_COEF_SHA256 JTR_ALIGN(16) ARCH_WORD_32 sse_out[64]; #endif for (idx = 0; idx < MAX_KEYS_PER_CRYPT; ++idx) { /* Prepare for the real work. */ SHA256_Init(&ctx); /* Add the key string. */ SHA256_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_key_length[MixOrder[index+idx]]); /* The last part is the salt string. This must be at most 16 characters and it ends at the first `$' character (for compatibility with existing implementations). */ SHA256_Update(&ctx, cur_salt->salt, cur_salt->len); /* Compute alternate SHA256 sum with input KEY, SALT, and KEY. The final result will be added to the first context. */ SHA256_Init(&alt_ctx); /* Add key. */ SHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_key_length[MixOrder[index+idx]]); /* Add salt. */ SHA256_Update(&alt_ctx, cur_salt->salt, cur_salt->len); /* Add key again. */ SHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_key_length[MixOrder[index+idx]]); /* Now get result of this (32 bytes) and add it to the other context. */ SHA256_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx); /* Add for any character in the key one byte of the alternate sum. */ for (cnt = saved_key_length[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE) SHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE); SHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt); /* Take the binary representation of the length of the key and for every 1 add the alternate sum, for every 0 the key. */ for (cnt = saved_key_length[MixOrder[index+idx]]; cnt > 0; cnt >>= 1) if ((cnt & 1) != 0) SHA256_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE); else SHA256_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_key_length[MixOrder[index+idx]]); /* Create intermediate result. */ SHA256_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx); /* Start computation of P byte sequence. */ SHA256_Init(&alt_ctx); /* For every character in the password add the entire password. */ for (cnt = 0; cnt < saved_key_length[MixOrder[index+idx]]; ++cnt) SHA256_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_key_length[MixOrder[index+idx]]); /* Finish the digest. */ SHA256_Final(temp_result, &alt_ctx); /* Create byte sequence P. */ cp = p_bytes; for (cnt = saved_key_length[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE) cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE; memcpy (cp, temp_result, cnt); /* Start computation of S byte sequence. */ SHA256_Init(&alt_ctx); /* For every character in the password add the entire password. */ for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt) SHA256_Update(&alt_ctx, cur_salt->salt, cur_salt->len); /* Finish the digest. */ SHA256_Final(temp_result, &alt_ctx); /* Create byte sequence S. */ cp = s_bytes; for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE) cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE; memcpy (cp, temp_result, cnt); /* Repeatedly run the collected hash value through SHA256 to burn CPU cycles. */ LoadCryptStruct(&crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes); } //dump_stuff(&crypt_struct, 2*64*8*BLKS); idx = 0; #ifdef MMX_COEF_SHA256 for (cnt = 1; ; ++cnt) { // printf ("SHA #%d\n", cnt); if (crypt_struct.datlen[idx]==128) { unsigned char *cp = crypt_struct.bufs[0][idx]; SSESHA256body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK); // dump_stuff_mmx(sse_out, 32, 0); SSESHA256body((__m128i *)&cp[64], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD); // if (!index && times == 1) { // printf("SHA1 : #%d\n", cnt); // dump_stuff_mmx(sse_out, 32, 0); // } } else { unsigned char *cp = crypt_struct.bufs[0][idx]; SSESHA256body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK); // if (!index && times == 1) { // printf("SHA1 : #%d\n", cnt); // dump_stuff_mmx(sse_out, 32, 0); // } } if (cnt == cur_salt->rounds) break; { int j, k; for (k = 0; k < MMX_COEF_SHA256; ++k) { ARCH_WORD_32 *o = (ARCH_WORD_32 *)crypt_struct.cptr[k][idx]; for (j = 0; j < 8; ++j) *o++ = JOHNSWAP(sse_out[(j<<(MMX_COEF_SHA256>>1))+k]); } } if (++idx == 42) idx = 0; } { int j, k; for (k = 0; k < MMX_COEF_SHA256; ++k) { ARCH_WORD_32 *o = (ARCH_WORD_32 *)crypt_out[MixOrder[index+k]]; for (j = 0; j < 8; ++j) *o++ = JOHNSWAP(sse_out[(j<<(MMX_COEF_SHA256>>1))+k]); } } #else SHA256_Init(&ctx); for (cnt = 1; ; ++cnt) { // calling with 64 byte, or 128 byte always, will force the update to properly crypt the data. // NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended. SHA256_Update(&ctx, crypt_struct.bufs[0][idx], crypt_struct.datlen[idx]); // if (times == 1) { // printf("SHA1 : #%d\n", cnt); // dump_stuff(ctx.h, 32); // } if (cnt == cur_salt->rounds) break; #ifdef JTR_INC_COMMON_CRYPTO_SHA2 SHA256_Final(crypt_struct.cptr[0][idx], &ctx); #else // !defined JTR_INC_COMMON_CRYPTO_SHA2, so it is oSSL, or generic #if ARCH_LITTLE_ENDIAN { int j; ARCH_WORD_32 *o = (ARCH_WORD_32 *)crypt_struct.cptr[0][idx]; for (j = 0; j < 8; ++j) *o++ = JOHNSWAP(ctx.h[j]); } #else memcpy(crypt_struct.cptr[0][idx], ctx.h, BINARY_SIZE); #endif #endif if (++idx == 42) idx = 0; #ifdef JTR_INC_COMMON_CRYPTO_SHA2 SHA256_Init(&ctx); #else // this memcpy is 'good enough', used instead of SHA256_Init() memcpy(ctx.h, ctx_init, sizeof(ctx_init)); #endif } #ifdef JTR_INC_COMMON_CRYPTO_SHA2 SHA256_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx); #else #if ARCH_LITTLE_ENDIAN { int j; ARCH_WORD_32 *o = (ARCH_WORD_32 *)crypt_out[MixOrder[index]]; for (j = 0; j < 8; ++j) *o++ = JOHNSWAP(ctx.h[j]); } #else memcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE); #endif #endif #endif // MMX_COEF_SHA256 // if (!index && times==1) { // printf ("crypt_out[%d] MixOrder[%d]\n", MixOrder[index], index); // dump_stuff(crypt_out[MixOrder[index]], 32); // } } MEM_FREE(MixOrder); // if (!index && times==1) // exit(0); return count; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { static struct saltstruct out; int len; out.rounds = ROUNDS_DEFAULT; ciphertext += 3; if (!strncmp(ciphertext, ROUNDS_PREFIX, sizeof(ROUNDS_PREFIX) - 1)) { const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1; char *endp; unsigned long int srounds = strtoul(num, &endp, 10); if (*endp == '$') { ciphertext = endp + 1; srounds = srounds < ROUNDS_MIN ? ROUNDS_MIN : srounds; out.rounds = srounds > ROUNDS_MAX ? ROUNDS_MAX : srounds; } } for (len = 0; ciphertext[len] != '$'; len++); memcpy(out.salt, ciphertext, len); out.len = len; return &out; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct saltstruct *sha256crypt_salt; sha256crypt_salt = salt; return (unsigned int)sha256crypt_salt->rounds; } #endif // Public domain hash function by DJ Bernstein // We are hashing the entire struct static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < SALT_SIZE; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_cryptsha256 = { { FORMAT_LABEL, FORMAT_NAME, "SHA256 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__div_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8) // A*D function (colscale): GB (_AxD__div_uint8) // D*A function (rowscale): GB (_DxB__div_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8) // C=scalar+B GB (_bind1st__div_uint8) // C=scalar+B' GB (_bind1st_tran__div_uint8) // C=A+scalar GB (_bind2nd__div_uint8) // C=A'+scalar GB (_bind2nd_tran__div_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
logit_loss.h
/** * Copyright (c) 2015 by Contributors */ #ifndef DIFACTO_LOSS_LOGIT_LOSS_H_ #define DIFACTO_LOSS_LOGIT_LOSS_H_ #include <vector> #include <cmath> #include "difacto/base.h" #include "difacto/loss.h" #include "dmlc/data.h" #include "dmlc/omp.h" #include "common/spmv.h" namespace difacto { /** * \brief the logistic loss * * :math:`\ell(x,y,w) = log(1 + exp(- y <w, x>))` * */ class LogitLoss : public Loss { public: LogitLoss() {} virtual ~LogitLoss() {} KWArgs Init(const KWArgs& kwargs) override { return kwargs; } /** * \brief perform prediction * * pred += X * w * * @param data the data X * @param param input parameters * - param[0], real_t vector, the weights * - param[1], optional int vector, the weight positions * @param pred predict output, should be pre-allocated */ void Predict(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* pred) override { CHECK_EQ(param.size(), 3); Predict(data, SArray<real_t>(param[0]), SArray<int>(param[1]), pred); } void Predict(const dmlc::RowBlock<unsigned>& data, const SArray<real_t>& weights, const SArray<int>& w_pos, SArray<real_t>* pred) { SArray<real_t> w = weights; SpMV::Times(data, w, pred, nthreads_, w_pos, {}); } /*! * \brief compute the gradients * * p = - y ./ (1 + exp (y .* pred)); * grad += X' * p; * * @param data the data X * @param param input parameters * - param[0], real_t vector, the predict output * - param[1], optional int vector, the gradient positions * @param grad the results, should be pre-allocated */ void CalcGrad(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* grad) override { CHECK_EQ(param.size(), 4); CalcGrad(data, SArray<real_t>(param[0]), SArray<int>(param[1]), SArray<real_t>(param[3]), grad); } void CalcGrad(const dmlc::RowBlock<unsigned>& data, const SArray<real_t>& weights, const SArray<int>& w_pos, const SArray<real_t>& pred, SArray<real_t>* grad) { SArray<real_t> p; p.CopyFrom(pred); CHECK_EQ(p.size(), data.size); // p = ... CHECK_NOTNULL(data.label); #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < p.size(); ++i) { real_t y = data.label[i] > 0 ? 1 : -1; if (data.weight) { p[i] = - y / (1 + std::exp(y * p[i])) * data.weight[i]; } else { p[i] = - y / (1 + std::exp(y * p[i])); } } // grad += ... SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos); } }; } // namespace difacto #endif // DIFACTO_LOSS_LOGIT_LOSS_H_
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 8a-inch/8a-36-outch kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00[4] = k4.row<const short>(q + i)[k]; g00[5] = k5.row<const short>(q + i)[k]; g00[6] = k6.row<const short>(q + i)[k]; g00[7] = k7.row<const short>(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd42_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r05 = vld1_s8(r0 + 40); int8x8_t _v4s8 = vdup_n_s8(4); int8x8_t _v5s8 = vdup_n_s8(5); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); // int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8)); // int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4); // int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4); // int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8)); vst1q_s16(tmp[0][m], _tmp0m); vst1q_s16(tmp[1][m], _tmp1m); vst1q_s16(tmp[2][m], _tmp2m); vst1q_s16(tmp[3][m], _tmp3m); vst1q_s16(tmp[4][m], _tmp4m); vst1q_s16(tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { int16x8_t _tmp00 = vld1q_s16(tmp[m][0]); int16x8_t _tmp01 = vld1q_s16(tmp[m][1]); int16x8_t _tmp02 = vld1q_s16(tmp[m][2]); int16x8_t _tmp03 = vld1q_s16(tmp[m][3]); int16x8_t _tmp04 = vld1q_s16(tmp[m][4]); int16x8_t _tmp05 = vld1q_s16(tmp[m][5]); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); int16x8_t _v5 = vdupq_n_s16(5); int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5); int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5); vst1q_s16(r0_tm_0, _r0tm0); vst1q_s16(r0_tm_1, _r0tm1); vst1q_s16(r0_tm_2, _r0tm2); vst1q_s16(r0_tm_3, _r0tm3); vst1q_s16(r0_tm_4, _r0tm4); vst1q_s16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { short* tm2p = tm2.row<short>(i / 8); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4); #else short* tm2p = tm2.row<short>(i / 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vswp d1, d2 \n" "vswp d5, d6 \n" "vswp q1, q2 \n" "vst4.s16 {d0-d3}, [%1 :64]! \n" "vst4.s16 {d4-d7}, [%1 :64]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { #if __aarch64__ short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4 + i % 4); #else short* tm2p = tm2.row<short>(i / 4 + i % 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.s16 {d0-d1}, [%0 :64] \n" "vst1.s16 {d0-d1}, [%1 :64]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); int* output4_tm = top_blob_tm.channel(p + 4); int* output5_tm = top_blob_tm.channel(p + 5); int* output6_tm = top_blob_tm.channel(p + 6); int* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 8); const short* kptr = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "smlal v16.4s, v8.4h, v0.h[0] \n" "smlal2 v17.4s, v8.8h, v0.h[0] \n" "smlal v18.4s, v8.4h, v0.h[1] \n" "smlal2 v19.4s, v8.8h, v0.h[1] \n" "smlal v20.4s, v8.4h, v0.h[2] \n" "smlal2 v21.4s, v8.8h, v0.h[2] \n" "smlal v22.4s, v8.4h, v0.h[3] \n" "smlal2 v23.4s, v8.8h, v0.h[3] \n" "smlal v24.4s, v8.4h, v0.h[4] \n" "smlal2 v25.4s, v8.8h, v0.h[4] \n" "smlal v26.4s, v8.4h, v0.h[5] \n" "smlal2 v27.4s, v8.8h, v0.h[5] \n" "smlal v28.4s, v8.4h, v0.h[6] \n" "smlal2 v29.4s, v8.8h, v0.h[6] \n" "smlal v30.4s, v8.4h, v0.h[7] \n" "smlal2 v31.4s, v8.8h, v0.h[7] \n" "smlal v16.4s, v9.4h, v1.h[0] \n" "smlal2 v17.4s, v9.8h, v1.h[0] \n" "smlal v18.4s, v9.4h, v1.h[1] \n" "smlal2 v19.4s, v9.8h, v1.h[1] \n" "smlal v20.4s, v9.4h, v1.h[2] \n" "smlal2 v21.4s, v9.8h, v1.h[2] \n" "smlal v22.4s, v9.4h, v1.h[3] \n" "smlal2 v23.4s, v9.8h, v1.h[3] \n" "smlal v24.4s, v9.4h, v1.h[4] \n" "smlal2 v25.4s, v9.8h, v1.h[4] \n" "smlal v26.4s, v9.4h, v1.h[5] \n" "smlal2 v27.4s, v9.8h, v1.h[5] \n" "smlal v28.4s, v9.4h, v1.h[6] \n" "smlal2 v29.4s, v9.8h, v1.h[6] \n" "smlal v30.4s, v9.4h, v1.h[7] \n" "smlal2 v31.4s, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%9], #64 \n" "smlal v16.4s, v10.4h, v2.h[0] \n" "smlal2 v17.4s, v10.8h, v2.h[0] \n" "smlal v18.4s, v10.4h, v2.h[1] \n" "smlal2 v19.4s, v10.8h, v2.h[1] \n" "smlal v20.4s, v10.4h, v2.h[2] \n" "smlal2 v21.4s, v10.8h, v2.h[2] \n" "smlal v22.4s, v10.4h, v2.h[3] \n" "smlal2 v23.4s, v10.8h, v2.h[3] \n" "smlal v24.4s, v10.4h, v2.h[4] \n" "smlal2 v25.4s, v10.8h, v2.h[4] \n" "smlal v26.4s, v10.4h, v2.h[5] \n" "smlal2 v27.4s, v10.8h, v2.h[5] \n" "smlal v28.4s, v10.4h, v2.h[6] \n" "smlal2 v29.4s, v10.8h, v2.h[6] \n" "smlal v30.4s, v10.4h, v2.h[7] \n" "smlal2 v31.4s, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "smlal v16.4s, v11.4h, v3.h[0] \n" "smlal2 v17.4s, v11.8h, v3.h[0] \n" "smlal v18.4s, v11.4h, v3.h[1] \n" "smlal2 v19.4s, v11.8h, v3.h[1] \n" "smlal v20.4s, v11.4h, v3.h[2] \n" "smlal2 v21.4s, v11.8h, v3.h[2] \n" "smlal v22.4s, v11.4h, v3.h[3] \n" "smlal2 v23.4s, v11.8h, v3.h[3] \n" "smlal v24.4s, v11.4h, v3.h[4] \n" "smlal2 v25.4s, v11.8h, v3.h[4] \n" "smlal v26.4s, v11.4h, v3.h[5] \n" "smlal2 v27.4s, v11.8h, v3.h[5] \n" "smlal v28.4s, v11.4h, v3.h[6] \n" "smlal2 v29.4s, v11.8h, v3.h[6] \n" "smlal v30.4s, v11.4h, v3.h[7] \n" "smlal2 v31.4s, v11.8h, v3.h[7] \n" "smlal v16.4s, v12.4h, v4.h[0] \n" "smlal2 v17.4s, v12.8h, v4.h[0] \n" "smlal v18.4s, v12.4h, v4.h[1] \n" "smlal2 v19.4s, v12.8h, v4.h[1] \n" "smlal v20.4s, v12.4h, v4.h[2] \n" "smlal2 v21.4s, v12.8h, v4.h[2] \n" "smlal v22.4s, v12.4h, v4.h[3] \n" "smlal2 v23.4s, v12.8h, v4.h[3] \n" "smlal v24.4s, v12.4h, v4.h[4] \n" "smlal2 v25.4s, v12.8h, v4.h[4] \n" "smlal v26.4s, v12.4h, v4.h[5] \n" "smlal2 v27.4s, v12.8h, v4.h[5] \n" "smlal v28.4s, v12.4h, v4.h[6] \n" "smlal2 v29.4s, v12.8h, v4.h[6] \n" "smlal v30.4s, v12.4h, v4.h[7] \n" "smlal2 v31.4s, v12.8h, v4.h[7] \n" "smlal v16.4s, v13.4h, v5.h[0] \n" "smlal2 v17.4s, v13.8h, v5.h[0] \n" "smlal v18.4s, v13.4h, v5.h[1] \n" "smlal2 v19.4s, v13.8h, v5.h[1] \n" "smlal v20.4s, v13.4h, v5.h[2] \n" "smlal2 v21.4s, v13.8h, v5.h[2] \n" "smlal v22.4s, v13.4h, v5.h[3] \n" "smlal2 v23.4s, v13.8h, v5.h[3] \n" "smlal v24.4s, v13.4h, v5.h[4] \n" "smlal2 v25.4s, v13.8h, v5.h[4] \n" "smlal v26.4s, v13.4h, v5.h[5] \n" "smlal2 v27.4s, v13.8h, v5.h[5] \n" "smlal v28.4s, v13.4h, v5.h[6] \n" "smlal2 v29.4s, v13.8h, v5.h[6] \n" "smlal v30.4s, v13.4h, v5.h[7] \n" "smlal2 v31.4s, v13.8h, v5.h[7] \n" "smlal v16.4s, v14.4h, v6.h[0] \n" "smlal2 v17.4s, v14.8h, v6.h[0] \n" "smlal v18.4s, v14.4h, v6.h[1] \n" "smlal2 v19.4s, v14.8h, v6.h[1] \n" "smlal v20.4s, v14.4h, v6.h[2] \n" "smlal2 v21.4s, v14.8h, v6.h[2] \n" "smlal v22.4s, v14.4h, v6.h[3] \n" "smlal2 v23.4s, v14.8h, v6.h[3] \n" "smlal v24.4s, v14.4h, v6.h[4] \n" "smlal2 v25.4s, v14.8h, v6.h[4] \n" "smlal v26.4s, v14.4h, v6.h[5] \n" "smlal2 v27.4s, v14.8h, v6.h[5] \n" "smlal v28.4s, v14.4h, v6.h[6] \n" "smlal2 v29.4s, v14.8h, v6.h[6] \n" "smlal v30.4s, v14.4h, v6.h[7] \n" "smlal2 v31.4s, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "smlal v16.4s, v15.4h, v7.h[0] \n" "smlal2 v17.4s, v15.8h, v7.h[0] \n" "smlal v18.4s, v15.4h, v7.h[1] \n" "smlal2 v19.4s, v15.8h, v7.h[1] \n" "smlal v20.4s, v15.4h, v7.h[2] \n" "smlal2 v21.4s, v15.8h, v7.h[2] \n" "smlal v22.4s, v15.4h, v7.h[3] \n" "smlal2 v23.4s, v15.8h, v7.h[3] \n" "smlal v24.4s, v15.4h, v7.h[4] \n" "smlal2 v25.4s, v15.8h, v7.h[4] \n" "smlal v26.4s, v15.4h, v7.h[5] \n" "smlal2 v27.4s, v15.8h, v7.h[5] \n" "smlal v28.4s, v15.4h, v7.h[6] \n" "smlal2 v29.4s, v15.8h, v7.h[6] \n" "smlal v30.4s, v15.4h, v7.h[7] \n" "smlal2 v31.4s, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val0), vget_low_s16(_w0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val0), vget_low_s16(_w0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val0), vget_low_s16(_w0), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val0), vget_low_s16(_w0), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val0), vget_high_s16(_w0), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val0), vget_high_s16(_w0), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val0), vget_high_s16(_w0), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val0), vget_high_s16(_w0), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val0), vget_low_s16(_w1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val0), vget_low_s16(_w1), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val0), vget_low_s16(_w1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val0), vget_low_s16(_w1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val0), vget_high_s16(_w1), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val0), vget_high_s16(_w1), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val0), vget_high_s16(_w1), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val0), vget_high_s16(_w1), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val1), vget_low_s16(_w2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val1), vget_low_s16(_w2), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val1), vget_low_s16(_w2), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val1), vget_low_s16(_w2), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val1), vget_high_s16(_w2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val1), vget_high_s16(_w2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val1), vget_high_s16(_w2), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val1), vget_high_s16(_w2), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val1), vget_low_s16(_w3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val1), vget_low_s16(_w3), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val1), vget_low_s16(_w3), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val1), vget_low_s16(_w3), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val1), vget_high_s16(_w3), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val1), vget_high_s16(_w3), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val1), vget_high_s16(_w3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val1), vget_high_s16(_w3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val2), vget_low_s16(_w4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val2), vget_low_s16(_w4), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val2), vget_low_s16(_w4), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val2), vget_low_s16(_w4), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val2), vget_high_s16(_w4), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val2), vget_high_s16(_w4), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val2), vget_high_s16(_w4), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val2), vget_high_s16(_w4), 3); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val2), vget_low_s16(_w5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val2), vget_low_s16(_w5), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val2), vget_low_s16(_w5), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val2), vget_low_s16(_w5), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val2), vget_high_s16(_w5), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val2), vget_high_s16(_w5), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val2), vget_high_s16(_w5), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val2), vget_high_s16(_w5), 3); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val3), vget_low_s16(_w6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val3), vget_low_s16(_w6), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val3), vget_low_s16(_w6), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val3), vget_low_s16(_w6), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val3), vget_high_s16(_w6), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val3), vget_high_s16(_w6), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val3), vget_high_s16(_w6), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val3), vget_high_s16(_w6), 3); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val3), vget_low_s16(_w7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val3), vget_low_s16(_w7), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val3), vget_low_s16(_w7), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val3), vget_low_s16(_w7), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val3), vget_high_s16(_w7), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val3), vget_high_s16(_w7), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val3), vget_high_s16(_w7), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val3), vget_high_s16(_w7), 3); r0 += 32; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output2_tm, _sum2); vst1q_s32(output3_tm, _sum3); vst1q_s32(output4_tm, _sum4); vst1q_s32(output5_tm, _sum5); vst1q_s32(output6_tm, _sum6); vst1q_s32(output7_tm, _sum7); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; output4_tm += 4; output5_tm += 4; output6_tm += 4; output7_tm += 4; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4); #else const short* r0 = bb2.row<const short>(i / 4 + i % 4); #endif const short* k0 = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); r0 += 8; k0 += 64; } output0_tm[0] = vgetq_lane_s32(_sum0, 0); output1_tm[0] = vgetq_lane_s32(_sum0, 1); output2_tm[0] = vgetq_lane_s32(_sum0, 2); output3_tm[0] = vgetq_lane_s32(_sum0, 3); output4_tm[0] = vgetq_lane_s32(_sum1, 0); output5_tm[0] = vgetq_lane_s32(_sum1, 1); output6_tm[0] = vgetq_lane_s32(_sum1, 2); output7_tm[0] = vgetq_lane_s32(_sum1, 3); output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 8); const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _r1 = vld1q_s16(r0 + 8); int16x8_t _r2 = vld1q_s16(r0 + 16); int16x8_t _r3 = vld1q_s16(r0 + 24); int16x8_t _r4 = vld1q_s16(r0 + 32); int16x8_t _r5 = vld1q_s16(r0 + 40); int16x8_t _r6 = vld1q_s16(r0 + 48); int16x8_t _r7 = vld1q_s16(r0 + 56); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r1), vget_low_s16(_k0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r1), vget_low_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_low_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_low_s16(_k0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r3), vget_low_s16(_k0), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r3), vget_low_s16(_k0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r4), vget_high_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r4), vget_high_s16(_k0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r5), vget_high_s16(_k0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r5), vget_high_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r6), vget_high_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r6), vget_high_s16(_k0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r7), vget_high_s16(_k0), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r7), vget_high_s16(_k0), 3); kptr += 8; r0 += 64; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum1); output0_tm += 8; } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _r1 = vld1q_s16(r0 + 8); int16x8_t _r2 = vld1q_s16(r0 + 16); int16x8_t _r3 = vld1q_s16(r0 + 24); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1), vget_low_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r1), vget_low_s16(_k0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_high_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_high_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r3), vget_high_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r3), vget_high_s16(_k0), 3); kptr += 8; r0 += 32; } int32x4_t _sum01 = vaddq_s32(_sum0, _sum1); vst1q_s32(output0_tm, _sum01); output0_tm += 4; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4); #else const short* r0 = bb2.row<const short>(i / 4 + i % 4); #endif const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0)); _sum1 = vmlal_s16(_sum1, vget_high_s16(_r0), vget_high_s16(_k0)); kptr += 8; r0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ int sum = vaddvq_s32(_sum); // dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); int sum = vget_lane_s32(_ss, 0); #endif output0_tm[0] = sum; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO neon optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_unaryop__minv_int32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_bool // op(A') function: GB_tran__minv_int32_bool // C type: int32_t // A type: bool // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ bool #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_bool ( int32_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
learntopicmat.h
#ifndef _LEARNTOPICMAT_H // Include guard #define _LEARNTOPICMAT_H #include <iostream> // cout,cerr,etc. #include <stdio.h> // printf, etc. #include <stdexcept> // Standard exceptions #include <omp.h> #include <cmath> // Eigen #include <Eigen/Dense> #include <Eigen/SparseCore> #include "utils.h" typedef Eigen::VectorXd VecType; typedef Eigen::MatrixXd MatType; template<typename ThetaEdgeType> struct LPMRF { private: const VecType& _logP; const VecType& _modL; public: const VecType& thetaNode; const ThetaEdgeType& thetaEdge; LPMRF(const VecType& thetaNode, const ThetaEdgeType& thetaEdge, const VecType& logP, const VecType& modL) : thetaNode(thetaNode), thetaEdge(thetaEdge), _logP(logP), _modL(modL) {} // Needed because of reference fields LPMRF operator=(const LPMRF& other) { return LPMRF(other.thetaNode, other.thetaEdge, other._logP, other._modL); } double logP(const size_t L) const { return _logP(L); } double modL(const size_t L) const { return _modL(L); } }; template<typename ZType, template<typename> class ProbModel, typename ThetaEdgeType, typename PriorType> class DualCoordinateStep { private: const std::vector< ProbModel<ThetaEdgeType> >& modelArray; const PriorType& prior; const std::vector<size_t>& filtIdx; // Maintenance variables VecType quadTerm; // Quad term (i.e. z_j'*thetaEdge{j}*z_j) MatType crossTerm; // Cross product term for step (i.e. z_j'*thetaEdge{j}*e_s = z_j'*thetaEdge{j}(:,s) //std::vector< MatType > thetaEdgeFiltArray; public: DualCoordinateStep(const std::vector< ProbModel<ThetaEdgeType> >& modelArray, const PriorType& prior, const ZType& ZtFilt, const std::vector<size_t>& filtIdx, const size_t Lmax) : modelArray(modelArray), prior(prior), filtIdx(filtIdx) { initMaintenanceVars(ZtFilt); } void initMaintenanceVars(const ZType& ZtFilt) { // Initialize maintenance variables size_t k = modelArray.size(); size_t pFilt = filtIdx.size(); // Compute cross term thetaEdge[j](filt)*ZtFilt // Hard to decide but updating is probably less frequent and probably needs to load a lot in memory since ell and m might be far apart crossTerm = MatType::Zero(k, pFilt); for(size_t j = 0; j < k; ++j) { for(size_t sFilt = 0; sFilt < pFilt; ++sFilt) { size_t sFilt2 = 0; for(typename ThetaEdgeType::InnerIterator it(modelArray[j].thetaEdge, filtIdx[sFilt]); it; ++it) { while(sFilt2 < pFilt && it.row() > filtIdx[sFilt2]) ++sFilt2; if(sFilt2 >= pFilt) break; if(it.row() == filtIdx[sFilt2]) crossTerm(j,sFilt) += ZtFilt(j,sFilt2)*it.value(); } } } // Compute quadratic term quadTerm = VecType::Zero(k, 1); for(size_t j = 0; j < k; ++j) { quadTerm(j) = ZtFilt.row(j).dot( crossTerm.row(j) ); } } // Update for a particular step double update(const size_t rFilt, const size_t ell, const size_t m, ZType& ZtFilt) { // Check different sizes (a) double diff, diff0, bestDiff = 1e100; int bestStep; for(int step = -ZtFilt(ell,rFilt); step <= ZtFilt(m,rFilt); ++step) { diff = stepDiff( step, rFilt, ell, m, ZtFilt ); if(step == 0) diff0 = diff; // Track minimum if(diff < bestDiff) { bestDiff = diff; bestStep = step; } } // If best step is non-zero, then update Zt and update maintenance variables if(bestStep != 0) { double bestStepD = bestStep; //std::cout << " Moving " << bestStep << " to coord " << ell+1 << " from coord " << m+1 << std::endl; MatType ZtFiltBefore = ZtFilt; // Update ZtFilt ZtFilt(ell,rFilt) += bestStep; ZtFilt(m,rFilt) -= bestStep; VecType quadTermBefore = quadTerm; // Quad term (i.e. z_j'*thetaEdge{j}*z_j) MatType crossTermBefore = crossTerm; // Cross product term for step (i.e. z_j'*thetaEdge{j}*e_s = z_j'*thetaEdge{j}(:,s) // Update maintenance variables quadTerm(ell) += 2*bestStepD*crossTerm(ell, rFilt); quadTerm(m) -= 2*bestStepD*crossTerm(m, rFilt); // Update crossTerm size_t r = filtIdx[rFilt]; size_t pFilt = filtIdx.size(); size_t sFilt = 0; for(typename ThetaEdgeType::InnerIterator it(modelArray[ell].thetaEdge, r); it; ++it) { while(sFilt < pFilt && it.row() > filtIdx[sFilt]) ++sFilt; if(sFilt >= pFilt) break; if(it.row() == filtIdx[sFilt]) crossTerm(ell,sFilt) += bestStepD*it.value(); } sFilt = 0; for(typename ThetaEdgeType::InnerIterator it(modelArray[m].thetaEdge, r); it; ++it) { while(sFilt < pFilt && it.row() > filtIdx[sFilt]) ++sFilt; if(sFilt >= pFilt) break; if(it.row() == filtIdx[sFilt]) crossTerm(m,sFilt) -= bestStepD*it.value(); } } double bestStepDiff = bestDiff - diff0; if( bestStep == 0 ) { bestStepDiff = 0; } return bestStepDiff; } protected: // Compute step difference from moving a words from ell to m of word r double stepDiff(const int a, const size_t rFilt, const size_t ell, const size_t m, const ZType& ZtFilt) const { // Setup some variables VecType ZsumNew = ZtFilt.rowwise().sum(); ZsumNew(ell) += a; ZsumNew(m) -= a; double modLell = modelArray[ell].modL( ZsumNew(ell) ); double modLm = modelArray[m].modL( ZsumNew(m) ); double logPell = modelArray[ell].logP( ZsumNew(ell) ); double logPm = modelArray[m].logP( ZsumNew(m) ); size_t r = filtIdx[rFilt]; // Compute each term double quad = -( modLell*(quadTerm(ell) + 2*a*crossTerm(ell, rFilt)) + modLm*(quadTerm(m) - 2*a*crossTerm(m, rFilt)) ); double lin = -a*( modelArray[ell].thetaNode(r) - modelArray[m].thetaNode(r) ); double bm = lgamma( ZtFilt(ell,rFilt)+a+1 ) + lgamma( ZtFilt(m,rFilt)-a+1 ); double logP = logPell + logPm; double negPrior = 0; //TODO implement prior specification double stepDiff = lin + quad + bm + logP + negPrior; /* std::cout.precision(5); std::cout << "step = " << a; std::cout << " lin = " << std::fixed << lin; std::cout << " quad = " << std::fixed << quad; std::cout << " bm = " << std::fixed << bm; std::cout << " logP = " << std::fixed << logP; std::cout << " negPrior = " << std::fixed << negPrior; std::cout << " stepDiff = " << std::fixed << stepDiff; std::cout << std::endl; */ return stepDiff; } }; template<typename ZType, template<typename> class ProbModel, typename ThetaEdgeType, typename PriorType> double learntopicmat( const std::vector< ProbModel< ThetaEdgeType > >& modelArray, const PriorType& prior, ZType& Zt ) { size_t k = modelArray.size(); assert( k == Zt.rows() && "Zt is not the right size"); // Convert Zt to MatType Zt only on non-zero rows size_t Lmax = -1; std::vector<size_t> filtIdx; for(size_t c = 0; c < Zt.outerSize(); ++c) { double sum = 0; for (typename ZType::InnerIterator it(Zt,c); it; ++it) { sum += it.value(); } if(sum != 0) filtIdx.push_back(c); if(ceil(sum) > Lmax) Lmax = ceil(sum); } MatType ZtFilt( k, filtIdx.size() ); for(size_t i = 0; i < filtIdx.size(); ++i) ZtFilt.col(i) = Zt.col( filtIdx[i] ); // Setup dual coordinate step object DualCoordinateStep<MatType, ProbModel, ThetaEdgeType, PriorType> dualStep(modelArray, prior, ZtFilt, filtIdx, Lmax); // Bunch of loops size_t outerMaxIter = 10, innerMaxIter = 2; double cumStepDiff = 0; for(size_t outerIter = 0; outerIter < outerMaxIter; ++outerIter) { bool outerNoMovement = true; // Loop over all nonzero columns of Zt for(size_t sFilt = 0; sFilt < filtIdx.size(); ++sFilt) { //varprint(filtIdx[sFilt]) for(size_t innerIter = 0; innerIter < innerMaxIter; ++innerIter) { bool noMovement = true; for(size_t ell = 0; ell < k; ++ell) { for(size_t m = ell+1; m < k; ++m) { //std::cout << outerIter << " " << innerIter << " " << ell << " " << m << std::endl; if(ZtFilt(ell, sFilt) == 0 && ZtFilt(m, sFilt) == 0) continue; double stepDiff = dualStep.update(sFilt, ell, m, ZtFilt); if(stepDiff > 0 && stepDiff < 1e-100) { std::cout << stepDiff << std::endl; } cumStepDiff += stepDiff; bool movement = (stepDiff != 0); noMovement = noMovement && !movement ; } } //std::cout << " Completed inner iter " << innerIter << std::endl; outerNoMovement = outerNoMovement && noMovement; if(noMovement) break; } } //std::cout << " Completed outer iter " << outerIter << std::endl; if(outerNoMovement) break; } //std::cout << "Done with iterations, saving output" << std::endl; // Extract results and update Zt std::vector< Eigen::Triplet<double> > triplets; for(size_t rFilt = 0; rFilt < ZtFilt.cols(); ++rFilt) for(size_t j = 0; j < ZtFilt.rows(); ++j) if(ZtFilt(j,rFilt) != 0) { triplets.push_back( Eigen::Triplet<double>(j, filtIdx[rFilt], ZtFilt(j, rFilt)) ); } Zt.setFromTriplets( triplets.begin(), triplets.end() ); //std::cout << "From returning function" << cumStepDiff << std::endl; return cumStepDiff; } // For learning all the topic matrices by simply looping through them template<typename ZType, template<typename> class ProbModel, typename ThetaEdgeType, typename PriorType> void learnalltopicmats(const std::vector< ProbModel<ThetaEdgeType> >& modelArray, const PriorType& prior, std::vector<ZType>& ZtArray) { size_t n = ZtArray.size(); int threadNums[n]; // Run parallel loop #pragma omp parallel for for(size_t i = 0; i < n; ++i) { double cumStepDiff = learntopicmat( modelArray, prior, ZtArray[i] ); threadNums[i] = omp_get_thread_num(); if(cumStepDiff < 0) { //std::cout.precision(10); //std::cout << " i = " << i+1 << "thread_id = " << omp_get_thread_num() << ", cumStepDiff = " << std::fixed << cumStepDiff << std::endl; } if(cumStepDiff > 0) { //std::cout << "Error? i = " << i << ", cumStepDiff = " << cumStepDiff << std::endl; //exit(1); } } /* for(size_t i = 0; i < n; ++i) { std::cout << "Num thread = " << threadNums[i] << std::endl; } */ } #endif
omp_test.c
#include <stdio.h> #include <omp.h> int main(void){ #pragma omp parallel { printf("Hello World! \n"); } return 0; }
pooling_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #256] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "fmax v0.4s, v0.4s, v2.4s \n" "fmax v1.4s, v1.4s, v3.4s \n" "fmaxp v2.4s, v0.4s, v1.4s \n" "subs %w0, %w0, #1 \n" "st1 {v2.4s}, [%3], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%1]! \n" "vld1.f32 {d4-d7}, [%2]! \n" "vmax.f32 q0, q0, q2 \n" "vmax.f32 q1, q1, q3 \n" "vpmax.f32 d4, d0, d1 \n" "vpmax.f32 d5, d2, d3 \n" "subs %0, #1 \n" "vst1.f32 {d4-d5}, [%3]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(r0[0], r0[1]); float max1 = std::max(r1[0], r1[1]); *outptr = std::max(max0, max1); r0 += 2; r1 += 2; outptr++; } r0 += tailstep; r1 += tailstep; } } }
mg_single.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - MG This benchmark is an OpenMP C version of the NPB MG code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: E. Barszcz P. Frederickson A. Woo M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* * #include "npb-C.h" */ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ #include <sys/time.h> typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* #include "globals.h" #include "npbparams.h" */ #define CLASS 'B' /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif #if CLASS == 'S' /* CLASS = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX_DEFAULT 32 #define NY_DEFAULT 32 #define NZ_DEFAULT 32 #define NIT_DEFAULT 4 #define LM 5 #define LT_DEFAULT 5 #define DEBUG_DEFAULT 0 #define NDIM1 5 #define NDIM2 5 #define NDIM3 5 #define CONVERTDOUBLE FALSE #define COMPILETIME "13 Mar 2013" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O3" #define CS6 "-lm -fopenmp" #define CS7 "randdp" #endif #if CLASS == 'W' /* CLASS = W */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX_DEFAULT 64 #define NY_DEFAULT 64 #define NZ_DEFAULT 64 #define NIT_DEFAULT 40 #define LM 6 #define LT_DEFAULT 6 #define DEBUG_DEFAULT 0 #define NDIM1 6 #define NDIM2 6 #define NDIM3 6 #define CONVERTDOUBLE FALSE #define COMPILETIME "13 Mar 2013" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O3" #define CS6 "-lm -fopenmp" #define CS7 "randdp" #endif #if CLASS == 'A' /* CLASS = A */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX_DEFAULT 256 #define NY_DEFAULT 256 #define NZ_DEFAULT 256 #define NIT_DEFAULT 4 #define LM 8 #define LT_DEFAULT 8 #define DEBUG_DEFAULT 0 #define NDIM1 8 #define NDIM2 8 #define NDIM3 8 #define CONVERTDOUBLE FALSE #define COMPILETIME "07 Mar 2013" #define NPBVERSION "2.3" #define CS1 "identityTranslator " #define CS2 "$(CC)" #define CS3 "/export/tmp.liao6/workspace/thrifty/build64..." #define CS4 "-I../common" #define CS5 "-rose:openmp:lowering " #define CS6 "-lm" #define CS7 "randdp" #endif #if CLASS == 'B' /* CLASS = B */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX_DEFAULT 256 #define NY_DEFAULT 256 #define NZ_DEFAULT 256 #define NIT_DEFAULT 20 #define LM 8 #define LT_DEFAULT 8 #define DEBUG_DEFAULT 0 #define NDIM1 8 #define NDIM2 8 #define NDIM3 8 #define CONVERTDOUBLE FALSE #define COMPILETIME "03 May 2013" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O3" #define CS6 "-lm -fopenmp" #define CS7 "randdp" #endif /* parameters */ /* actual dimension including ghost cells for communications */ #define NM (2+(2<<(LM-1))) /* size of rhs array */ #define NV (2+(2<<(NDIM1-1))*(2+(2<<(NDIM2-1)))*(2+(2<<(NDIM3-1)))) /* size of residual array */ #define NR ((8*(NV+(NM*NM)+5*NM+7*LM))/7) /* size of communication buffer */ #define NM2 (2*NM*NM) /* maximum number of levels */ #define MAXLEVEL 11 /*---------------------------------------------------------------------*/ /* common /mg3/ */ static int nx[MAXLEVEL+1], ny[MAXLEVEL+1], nz[MAXLEVEL+1]; /* common /ClassType/ */ static char Class; /* common /my_debug/ */ static int debug_vec[8]; /* common /fap/ */ /*static int ir[MAXLEVEL], m1[MAXLEVEL], m2[MAXLEVEL], m3[MAXLEVEL];*/ static int m1[MAXLEVEL+1], m2[MAXLEVEL+1], m3[MAXLEVEL+1]; static int lt, lb; /*c--------------------------------------------------------------------- c Set at m=1024, can handle cases up to 1024^3 case c---------------------------------------------------------------------*/ #define M 1037 /* common /buffer/ */ /*static double buff[4][NM2];*/ /* parameters */ #define T_BENCH 1 #define T_INIT 2 /* global variables */ /* common /grid/ */ static int is1, is2, is3, ie1, ie2, ie3; /* functions prototypes */ static void setup(int *n1, int *n2, int *n3, int lt); static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k); static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k); static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ); static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ); static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ); static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk); static void comm3(double ***u, int n1, int n2, int n3, int kk); static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k); static void showall(double ***z, int n1, int n2, int n3); static double power( double a, int n ); static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ); static void zero3(double ***z, int n1, int n2, int n3); static void nonzero(double ***z, int n1, int n2, int n3); /*-------------------------------------------------------------------- program mg c-------------------------------------------------------------------*/ int main(int argc, char *argv[]) { /*------------------------------------------------------------------------- c k is the current level. It is passed down through subroutine args c and is NOT global. it is the current iteration c------------------------------------------------------------------------*/ int k, it; double t, tinit, mflops; int nthreads = 1; /*------------------------------------------------------------------------- c These arrays are in common because they are quite large c and probably shouldn't be allocated on the stack. They c are always passed as subroutine args. c------------------------------------------------------------------------*/ double ****u, ***v, ****r; /* Dynamically allocated arrays, not linear storage across dimensions */ double a[4], c[4]; double rnm2, rnmu; double epsilon = 1.0e-8; int n1, n2, n3, nit; double verify_value; boolean verified; int i, j, l; FILE *fp; timer_clear(T_BENCH); timer_clear(T_INIT); timer_start(T_INIT); /*---------------------------------------------------------------------- c Read in and broadcast input data c---------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - MG Benchmark\n\n"); fp = fopen("mg.input", "r"); if (fp != NULL) { printf(" Reading from input file mg.input\n"); fscanf(fp, "%d", &lt); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]); while(fgetc(fp) != '\n'); fscanf(fp, "%d", &nit); while(fgetc(fp) != '\n'); for (i = 0; i <= 7; i++) { fscanf(fp, "%d", &debug_vec[i]); } fclose(fp); } else { printf(" No input file. Using compiled defaults\n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for (i = 0; i <= 7; i++) { debug_vec[i] = DEBUG_DEFAULT; } } if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) { Class = 'U'; } else if( nx[lt] == 32 && nit == 4 ) { Class = 'S'; } else if( nx[lt] == 64 && nit == 40 ) { Class = 'W'; } else if( nx[lt] == 256 && nit == 20 ) { Class = 'B'; } else if( nx[lt] == 512 && nit == 20 ) { Class = 'C'; } else if( nx[lt] == 256 && nit == 4 ) { Class = 'A'; } else { Class = 'U'; } /*-------------------------------------------------------------------- c Use these for debug info: c--------------------------------------------------------------------- c debug_vec(0) = 1 !=> report all norms c debug_vec(1) = 1 !=> some setup information c debug_vec(1) = 2 !=> more setup information c debug_vec(2) = k => at level k or below, show result of resid c debug_vec(3) = k => at level k or below, show result of psinv c debug_vec(4) = k => at level k or below, show result of rprj c debug_vec(5) = k => at level k or below, show result of interp c debug_vec(6) = 1 => (unused) c debug_vec(7) = 1 => (unused) c-------------------------------------------------------------------*/ a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if (Class == 'A' || Class == 'S' || Class =='W') { /*-------------------------------------------------------------------- c Coefficients for the S(a) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/8.0; c[1] = 1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; } else { /*-------------------------------------------------------------------- c Coefficients for the S(b) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/17.0; c[1] = 1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; setup(&n1,&n2,&n3,lt); u = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { u[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { u[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { u[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } v = (double ***)malloc(m3[lt]*sizeof(double **)); for (k = 0; k < m3[lt]; k++) { v[k] = (double **)malloc(m2[lt]*sizeof(double *)); for (j = 0; j < m2[lt]; j++) { v[k][j] = (double *)malloc(m1[lt]*sizeof(double)); } } r = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { r[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { r[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { r[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } #pragma omp parallel { zero3(u[lt],n1,n2,n3); } zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); #pragma omp parallel { norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); #pragma omp single { /* printf("\n norms of random v are\n"); printf(" %4d%19.12e%19.12e\n", 0, rnm2, rnmu); printf(" about to evaluate resid, k= %d\n", lt);*/ printf(" Size: %3dx%3dx%3d (class %1c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %3d\n", nit); } resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /*c--------------------------------------------------------------------- c One iteration for startup c---------------------------------------------------------------------*/ mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); #pragma omp single setup(&n1,&n2,&n3,lt); zero3(u[lt],n1,n2,n3); } /* pragma omp parallel */ zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); timer_stop(T_INIT); timer_start(T_BENCH); #pragma omp parallel firstprivate(nit) private(it) { resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); for ( it = 1; it <= nit; it++) { mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); } norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif } /* pragma omp parallel */ timer_stop(T_BENCH); t = timer_read(T_BENCH); tinit = timer_read(T_INIT); verified = FALSE; verify_value = 0.0; printf(" Initialization time: %15.3f seconds\n", tinit); printf(" Benchmark completed\n"); if (Class != 'U') { if (Class == 'S') { verify_value = 0.530770700573e-04; } else if (Class == 'W') { verify_value = 0.250391406439e-17; /* 40 iterations*/ /* 0.183103168997d-044 iterations*/ } else if (Class == 'A') { verify_value = 0.2433365309e-5; } else if (Class == 'B') { verify_value = 0.180056440132e-5; } else if (Class == 'C') { verify_value = 0.570674826298e-06; } if ( fabs( rnm2 - verify_value ) <= epsilon ) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" Error is %20.12e\n", rnm2 - verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" The correct L2 Norm is %20.12e\n", verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { int nn = nx[lt]*ny[lt]*nz[lt]; mflops = 58.*nit*nn*1.0e-6 / t; } else { mflops = 0.0; } c_print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /* set up a series of grid sizes */ static void setup(int *n1, int *n2, int *n3, int lt) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int k; /* decreasing from the coarse grid (last iteration?) */ for ( k = lt-1; k >= 1; k--) { nx[k] = nx[k+1]/2; ny[k] = ny[k+1]/2; nz[k] = nz[k+1]/2; } for (k = 1; k <= lt; k++) { m1[k] = nx[k]+2; m2[k] = nz[k]+2; m3[k] = ny[k]+2; } is1 = 1; ie1 = nx[lt]; *n1 = nx[lt]+2; is2 = 1; ie2 = ny[lt]; *n2 = ny[lt]+2; is3 = 1; ie3 = nz[lt]; *n3 = nz[lt]+2; if (debug_vec[1] >= 1 ) { printf(" in setup, \n"); printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", lt,nx[lt],ny[lt],nz[lt],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c multigrid V-cycle routine c-------------------------------------------------------------------*/ int j; /*-------------------------------------------------------------------- c down cycle. c restrict the residual from the fine grid to the coarse c-------------------------------------------------------------------*/ for (k = lt; k >= lb+1; k--) { j = k-1; rprj3(r[k], m1[k], m2[k], m3[k], r[j], m1[j], m2[j], m3[j], k); } k = lb; /*-------------------------------------------------------------------- c compute an approximate solution on the coarsest grid c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); for (k = lb+1; k <= lt-1; k++) { j = k-1; /*-------------------------------------------------------------------- c prolongate from level k-1 to k c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); interp(u[j], m1[j], m2[j], m3[j], u[k], m1[k], m2[k], m3[k], k); /*-------------------------------------------------------------------- c compute residual for level k c-------------------------------------------------------------------*/ resid(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k); /*-------------------------------------------------------------------- c apply smoother c-------------------------------------------------------------------*/ psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k); resid(u[lt], v, r[lt], n1, n2, n3, a, k); psinv(r[lt], u[lt], n1, n2, n3, c, k); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /* similar to stencil computation */ static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c psinv applies an approximate inverse as smoother: u = u + Cr c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Presuming coefficient c(3) is zero (the NPB assumes this, c but it is thus not a general case), 2A + 1M may be eliminated, c resulting in 13A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double r1[M], r2[M]; #pragma omp for for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); /*-------------------------------------------------------------------- c Assume c(3) = 0 (Enable line below if c(3) not= 0) c--------------------------------------------------------------------- c > + c(3) * ( r2(i1-1) + r2(i1+1) ) c-------------------------------------------------------------------*/ } } } /*-------------------------------------------------------------------- c exchange boundary points c-------------------------------------------------------------------*/ comm3(u,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { #pragma omp single rep_nrm(u,n1,n2,n3," psinv",k); } if ( debug_vec[3] >= k ) { #pragma omp single showall(u,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c resid computes the residual: r = v - Au c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition (or Subtraction) and c Multiplication, respectively. c Presuming coefficient a(1) is zero (the NPB assumes this, c but it is thus not a general case), 3A + 1M may be eliminated, c resulting in 12A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double u1[M], u2[M]; #pragma omp for for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] /*-------------------------------------------------------------------- c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0) c--------------------------------------------------------------------- c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3) c > + u1(i1) ) c-------------------------------------------------------------------*/ - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } /*-------------------------------------------------------------------- c exchange boundary data c--------------------------------------------------------------------*/ comm3(r,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { #pragma omp single rep_nrm(r,n1,n2,n3," resid",k); } if ( debug_vec[2] >= k ) { #pragma omp single showall(r,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c rprj3 projects onto the next coarser grid, c using a trilinear Finite Element projection: s = r' = P r c c This implementation costs 20A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int j3, j2, j1, i3, i2, i1, d1, d2, d3; double x1[M], y1[M], x2, y2; if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp for for (j3 = 1; j3 < m3j-1; j3++) { i3 = 2*j3-d3; /*C i3 = 2*j3-1*/ for (j2 = 1; j2 < m2j-1; j2++) { i2 = 2*j2-d2; /*C i2 = 2*j2-1*/ for (j1 = 1; j1 < m1j; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1] + r[i3][i2+1][i1] + r[i3+2][i2+1][i1]; y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1] + r[i3][i2+2][i1] + r[i3+2][i2+2][i1]; } for (j1 = 1; j1 < m1j-1; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1] + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1] + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); } } } comm3(s,m1j,m2j,m3j,k-1); if (debug_vec[0] >= 1 ) { #pragma omp single rep_nrm(s,m1j,m2j,m3j," rprj3",k-1); } if (debug_vec[4] >= k ) { #pragma omp single showall(s,m1j,m2j,m3j); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c interp adds the trilinear interpolation of the correction c from the coarser grid to the current approximation: u = u + Qu' c c Observe that this implementation costs 16A + 4M, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. Vector machines may get slightly better c performance however, with 8 separate "do i1" loops, rather than 4. c-------------------------------------------------------------------*/ int i3, i2, i1, d1, d2, d3, t1, t2, t3; /* c note that m = 1037 in globals.h but for this only need to be c 535 to handle up to 1024^3 c integer m c parameter( m=535 ) */ double z1[M], z2[M], z3[M]; if ( n1 != 3 && n2 != 3 && n3 != 3 ) { #pragma omp for for (i3 = 0; i3 < mm3-1; i3++) { for (i2 = 0; i2 < mm2-1; i2++) { for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] +z[i3][i2][i1]; u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] +0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] +0.5 * z1[i1]; u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] +0.25*( z1[i1] + z1[i1+1] ); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] +0.5 * z2[i1]; u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] +0.25*( z2[i1] + z2[i1+1] ); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] +0.25* z3[i1]; u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] +0.125*( z3[i1] + z3[i1+1] ); } } } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp for for ( i3 = d3; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] +z[i3-1][i2-1][i1-1]; } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] +0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] +0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] +0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } #pragma omp for for ( i3 = 1; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] +0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] +0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1] +z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] +0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] +0.125*(z[i3][i2][i1]+z[i3][i2-1][i1] +z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } } #pragma omp single { if (debug_vec[0] >= 1 ) { rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1); rep_nrm(u,n1,n2,n3,"u: inter",k); } if ( debug_vec[5] >= k ) { showall(z,mm1,mm2,mm3); showall(u,n1,n2,n3); } } /* pragma omp single */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c norm2u3 evaluates approximations to the L2 norm and the c uniform (or L-infinity or Chebyshev) norm, under the c assumption that the boundaries are periodic or zero. Add the c boundaries in with half weight (quarter weight on the edges c and eighth weight at the corners) for inhomogeneous boundaries. c-------------------------------------------------------------------*/ static double s = 0.0; double tmp; int i3, i2, i1, n; double p_s = 0.0, p_a = 0.0; n = nx*ny*nz; #pragma omp for for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { p_s = p_s + r[i3][i2][i1] * r[i3][i2][i1]; tmp = fabs(r[i3][i2][i1]); if (tmp > p_a) p_a = tmp; } } } #pragma omp critical { s += p_s; if (p_a > *rnmu) *rnmu = p_a; } #pragma omp barrier #pragma omp single { *rnm2 = sqrt(s/(double)n); s = 0.0; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c report on norm c-------------------------------------------------------------------*/ double rnm2, rnmu; norm2u3(u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /* Exchange boundary information */ static void comm3(double ***u, int n1, int n2, int n3, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c comm3 organizes the communication on all borders c-------------------------------------------------------------------*/ int i1, i2, i3; /* axis = 1 */ #pragma omp for for ( i3 = 1; i3 < n3-1; i3++) { for ( i2 = 1; i2 < n2-1; i2++) { u[i3][i2][n1-1] = u[i3][i2][1]; u[i3][i2][0] = u[i3][i2][n1-2]; } } /* axis = 2 */ #pragma omp for for ( i3 = 1; i3 < n3-1; i3++) { for ( i1 = 0; i1 < n1; i1++) { u[i3][n2-1][i1] = u[i3][1][i1]; u[i3][0][i1] = u[i3][n2-2][i1]; } } /* axis = 3 */ #pragma omp for for ( i2 = 0; i2 < n2; i2++) { for ( i1 = 0; i1 < n1; i1++) { u[n3-1][i2][i1] = u[1][i2][i1]; u[0][i2][i1] = u[n3-2][i2][i1]; } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zran3 loads +1 at ten randomly chosen points, c loads -1 at a different ten random points, c and zero elsewhere. c-------------------------------------------------------------------*/ #define MM 10 #define A pow(5.0,13) #define X 314159265.e0 int i0, m0, m1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; double ten[MM][2], best; int i, j1[MM][2], j2[MM][2], j3[MM][2]; int jg[4][MM][2]; double rdummy; a1 = power( A, nx ); a2 = power( A, nx*ny ); #pragma omp parallel { zero3(z,n1,n2,n3); } i = is1-1+nx*(is2-1+ny*(is3-1)); ai = power( A, i ); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = X; rdummy = randlc( &x0, ai ); for (i3 = 1; i3 < e3; i3++) { x1 = x0; for (i2 = 1; i2 < e2; i2++) { xx = x1; vranlc( d1, &xx, A, &(z[i3][i2][0])); rdummy = randlc( &x1, a1 ); } rdummy = randlc( &x0, a2 ); } /*-------------------------------------------------------------------- c call comm3(z,n1,n2,n3) c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c each processor looks for twenty candidates c-------------------------------------------------------------------*/ for (i = 0; i < MM; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { if ( z[i3][i2][i1] > ten[0][1] ) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble( ten, j1, j2, j3, MM, 1 ); } if ( z[i3][i2][i1] < ten[0][0] ) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble( ten, j1, j2, j3, MM, 0 ); } } } } /*-------------------------------------------------------------------- c Now which of these are globally best? c-------------------------------------------------------------------*/ i1 = MM - 1; i0 = MM - 1; for (i = MM - 1 ; i >= 0; i--) { best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]]; if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 1 + j1[i1][1]; jg[2][i][1] = is2 - 1 + j2[i1][1]; jg[3][i][1] = is3 - 1 + j3[i1][1]; i1 = i1-1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } ten[i][1] = best; best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]]; if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 1 + j1[i0][0]; jg[2][i][0] = is2 - 1 + j2[i0][0]; jg[3][i][0] = is3 - 1 + j3[i0][0]; i0 = i0-1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } ten[i][0] = best; } m1 = i1+1; m0 = i0+1; /* printf(" negative charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); } printf("\n positive charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); } printf("\n small random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][0]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); } printf("\n large random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][1]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); } printf("\n");*/ #pragma omp parallel for private(i2, i1) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } for (i = MM-1; i >= m0; i--) { z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0; } for (i = MM-1; i >= m1; i--) { z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0; } #pragma omp parallel comm3(z,n1,n2,n3,k); /*-------------------------------------------------------------------- c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void showall(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1,i2,i3; int m1, m2, m3; m1 = min(n1,18); m2 = min(n2,14); m3 = min(n3,18); printf("\n"); for (i3 = 0; i3 < m3; i3++) { for (i1 = 0; i1 < m1; i1++) { for (i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double power( double a, int n ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c power raises an integer, disguised as a double c precision real, to an integer power c-------------------------------------------------------------------*/ double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while (nj != 0) { if( (nj%2) == 1 ) rdummy = randlc( &power, aj ); rdummy = randlc( &aj, aj ); nj = nj/2; } return (power); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c bubble does a bubble sort in direction dir c-------------------------------------------------------------------*/ double temp; int i, j_temp; if ( ind == 1 ) { for (i = 0; i < m-1; i++) { if ( ten[i][ind] > ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m-1; i++) { if ( ten[i][ind] < ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zero3(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1, i2, i3; #pragma omp for for (i3 = 0;i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } } /*---- end of program ------------------------------------------------*/ /* cat ./common/c_print_results.c */ /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* cat ./common/c_randdp.c */ #if defined(USE_POW) #define r23 pow(0.5, 23.0) #define r46 (r23*r23) #define t23 pow(2.0, 23.0) #define t46 (t23*t23) #else #define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5) #define r46 (r23*r23) #define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0) #define t46 (t23*t23) #endif /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ double randlc (double *x, double a) { /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine returns a uniform pseudorandom double precision number in the c range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The returned value RANDLC is normalized to be c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain c the new seed x_1, so that subsequent calls to RANDLC using the same c arguments will generate a continuous sequence. c c This routine should produce the same results on any computer with at least c 48 mantissa bits in double precision floating point data. On 64 bit c systems, double precision should be disabled. c c David H. Bailey October 26, 1990 c c---------------------------------------------------------------------*/ double t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * (*x); x1 = (int)t1; x2 = (*x) - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); (*x) = t3 - t46 * t4; return (r46 * (*x)); } /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ void vranlc (int n, double *x_seed, double a, double* y) { /* void vranlc (int n, double *x_seed, double a, double y[]) { */ /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine generates N uniform pseudorandom double precision numbers in c the range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The N results are placed in Y and are normalized c to be between 0 and 1. X is updated to contain the new seed, so that c subsequent calls to VRANLC using the same arguments will generate a c continuous sequence. If N is zero, only initialization is performed, and c the variables X, A and Y are ignored. c c This routine is the standard version designed for scalar or RISC systems. c However, it should produce the same results on any single processor c computer with at least 48 mantissa bits in double precision floating point c data. On 64 bit systems, double precision should be disabled. c c---------------------------------------------------------------------*/ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; x = *x_seed; /*c--------------------------------------------------------------------- c Generate N results. This loop is not vectorizable. c---------------------------------------------------------------------*/ for (i = 1; i <= n; i++) { /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * x; x1 = (int)t1; x2 = x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); x = t3 - t46 * t4; y[i] = r46 * x; } *x_seed = x; } /* cat ./common/c_timers.c */ /* #include "wtime.h" #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif */ /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); // gettimeofday(&tv, (struct timezone *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; }
func.h
#pragma once void showGplumVersion(std::string version) { if ( PS::Comm::getRank() == 0 ) { version.resize(16,' '); std::cout << " \n " << " __________________________________ \n" << " / \\ \n" << " | ____ ____ _ _ _ __ __ | \n" << " | / ___| _ \\| | | | | | \\/ | | \n" << " | | | _| |_) | | | | | | |\\/| | | \n" << " | | |_| | __/| |__| |_| | | | | | \n" << " | \\____|_| |_____\\___/|_| |_| | \n" << " | | \n" << " | Global Planetary Simulation Code | \n" << " | with Mass-dependent Cut-off | \n" << " | Version " << version << " | \n" << " \\__________________________________/ \n" << " \n" << " Licence: MIT (see, https://github.com/YotaIshigaki/GPLUM/blob/master/LICENSE) \n" << " \n" << " Copyright (C) 2020 \n" << " Yota Ishigaki, Junko Kominmi, Junichiro Makino, \n" << " Masaki Fujimoto and Masaki Iwasawa \n" << " \n"; } } template <class Tpsys> void calcMeanMass(Tpsys & pp, PS::F64 & m_mean, PS::F64 & m_max, PS::F64 & nei_mean) { const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_glb = pp.getNumberOfParticleGlobal(); PS::F64 m_sum_loc = 0.; PS::F64 m_max_loc = 0.; PS::S32 nei_sum_loc = 0; for (PS::S32 i=0; i<n_loc; i++ ){ m_sum_loc += pp[i].mass; if ( pp[i].mass > m_max_loc ) m_max_loc = pp[i].mass; nei_sum_loc += pp[i].neighbor; } m_mean = PS::Comm::getSum(m_sum_loc) / n_glb; m_max = PS::Comm::getMaxValue(m_max_loc); nei_mean = (PS::F64)PS::Comm::getSum(nei_sum_loc) / n_glb; } template <class Tpsys> void makeSnap(Tpsys & pp, PS::F64 time_sys, Energy e_init, Energy e_now, const char * dir_name, const PS::S32 isnap, const PS::S32 id_next) { FileHeader header(pp.getNumberOfParticleGlobal(), id_next, time_sys, e_init, e_now); char filename[256]; sprintf(filename, "%s/snap%06d.dat", dir_name, isnap); pp.writeParticleAscii(filename, header); } template <class Tpsys> void outputStep(Tpsys & pp, PS::F64 time_sys, Energy e_init, Energy e_now, PS::F64 de, PS::S32 n_col_tot, PS::S32 n_frag_tot, const char * dir_name, const PS::S32 isnap, const PS::S32 id_next, std::ofstream & fout_eng, Wtime wtime, PS::S32 n_largestcluster, PS::S32 n_cluster, PS::S32 n_isoparticle, bool bSnap=true) { const PS::S32 n_tot = pp.getNumberOfParticleGlobal(); if ( bSnap ) makeSnap(pp, time_sys, e_init, e_now, dir_name, isnap, id_next); #ifdef OUTPUT_DETAIL PS::F64 m_mean = 0.; PS::F64 m_max = 0.; PS::F64 nei_mean = 0.; calcMeanMass(pp, m_mean, m_max, nei_mean); #endif if(PS::Comm::getRank() == 0 && bSnap){ //PS::F64 de = e_now.calcEnergyError(e_init); //PS::F64 de_tmp = sqrt(de*de); //if( de_tmp > de_max ) de_max = de_tmp; fout_eng << std::fixed<<std::setprecision(8) << time_sys << "\t" << n_tot << "\t" << std::scientific<<std::setprecision(15) << e_now.etot << "\t" << de << "\t" << n_largestcluster << "\t" << n_cluster << "\t" << n_isoparticle #ifdef OUTPUT_DETAIL << "\t" << m_max << "\t" << m_mean << "\t" << nei_mean #endif #ifdef CALC_WTIME << "\t" << wtime.soft_step << "\t" << wtime.hard_step << "\t" << wtime.calc_soft_force_step << "\t" << wtime.neighbor_search_step << "\t" << wtime.calc_hard_force_step << "\t" << wtime.create_cluster_step << "\t" << wtime.communication_step << "\t" << wtime.output_step #endif << std::endl; } } template <class Tpsys> void inputIDLocalAndMyrank(Tpsys & pp, NeighborList & NList) { const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S32 myrank = PS::Comm::getRank(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ pp[i].id_local = i; pp[i].myrank = myrank; pp[i].inDomain = true; pp[i].isSent = false; } NList.makeIdMap(pp); } template <class Tpsys> void MergeParticle(Tpsys & pp, PS::S32 n_col, PS::F64 & edisp) { const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S32 n_remove = 0; PS::S32 * remove = new PS::S32[n_col]; PS::F64 edisp_loc = 0.; #pragma omp parallel for reduction (-:edisp_loc) for ( PS::S32 i=0; i<n_loc; i++ ){ if ( pp[i].isMerged ) { for ( PS::S32 j=0; j<n_loc; j++ ){ if ( pp[j].id == pp[i].id && i != j ){ PS::F64 mi = pp[i].mass; PS::F64 mj = pp[j].mass; PS::F64vec vrel = pp[j].vel - pp[i].vel; pp[i].mass += mj; pp[i].vel = ( mi*pp[i].vel + mj*pp[j].vel )/(mi+mj); //pp[i].acc = ( mi*pp[i].acc + mj*pp[j].acc )/(mi+mj); #ifdef GAS_DRAG pp[i].acc_gd = ( mi*pp[i].acc_gd + mj*pp[j].acc_gd )/(mi+mj); #endif pp[i].phi = ( mi*pp[i].phi + mj*pp[j].phi )/(mi+mj); pp[i].phi_d = ( mi*pp[i].phi_d + mj*pp[j].phi_d )/(mi+mj); edisp_loc -= 0.5 * mi*mj/(mi+mj) * vrel*vrel; #pragma omp critical { remove[n_remove] = j; n_remove ++; } assert ( pp[i].pos == pp[j].pos ); assert ( pp[j].isDead ); } } pp[i].isMerged = false; } } PS::Comm::barrier(); edisp += PS::Comm::getSum(edisp_loc); if ( n_remove ){ pp.removeParticle(remove, n_remove); } delete [] remove; } template <class Tpsys> PS::S32 removeOutOfBoundaryParticle(Tpsys & pp, PS::F64 & edisp, const PS::F64 r_max, const PS::F64 r_min, std::ofstream & fout_rem) { const PS::F64 rmax2 = r_max*r_max; const PS::F64 rmin2 = r_min*r_min; PS::F64 edisp_loc = 0.; const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_proc = PS::Comm::getNumberOfProc(); std::vector<PS::S32> remove_list; remove_list.clear(); #ifdef INDIRECT_TERM PS::F64 e_ind_before = 0.; PS::F64 e_ind_after = 0.; #endif #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++ ){ PS::F64vec posi = pp[i].pos; PS::F64 pos2 = posi*posi; if ( pos2 > rmax2 || pos2 < rmin2 ){ #pragma omp critical { remove_list.push_back(i); } } } PS::S32 n_remove_loc = remove_list.size(); PS::S32 n_remove_glb = PS::Comm::getSum(n_remove_loc); /*if ( n_remove_glb == 1 ){ if ( n_remove_loc ) { PS::S32 i_remove = remove_list.at(0); PS::F64 massi = pp[i_remove].mass; PS::F64vec veli = pp[i_remove].vel; edisp_loc -= 0.5*massi* veli*veli; edisp_loc -= massi * pp[i_remove].phi_s; edisp_loc -= massi * pp[i_remove].phi_d; edisp_loc -= massi * pp[i_remove].phi; std::cerr << "Remove Particle " << pp[i_remove].id << std::endl << "Position : " << std::setprecision(15) << pp[i_remove].pos << std::endl; fout_rem << std::fixed<<std::setprecision(8) << pp[i_remove].time << "\t" << pp[i_remove].id << "\t" << std::scientific << std::setprecision(15) << pp[i_remove].mass << "\t" << pp[i_remove].pos.x << "\t" << pp[i_remove].pos.y << "\t" << pp[i_remove].pos.z << "\t" << pp[i_remove].vel.x << "\t" << pp[i_remove].vel.y << "\t" << pp[i_remove].vel.z << std::endl; } } else if ( n_remove_glb > 1 ){*/ if ( n_remove_glb ){ PS::S32 * n_remove_list = nullptr; PS::S32 * n_remove_adr = nullptr; FPGrav * remove_list_loc = nullptr; FPGrav * remove_list_glb = nullptr; if ( PS::Comm::getRank() == 0 ){ n_remove_list = new PS::S32[n_proc]; n_remove_adr = new PS::S32[n_proc]; remove_list_glb = new FPGrav[n_remove_glb]; } remove_list_loc = new FPGrav[n_remove_loc]; #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Gather(&n_remove_loc, 1, PS::GetDataType(n_remove_loc), n_remove_list, 1, PS::GetDataType(*n_remove_list), 0, MPI_COMM_WORLD); #else n_remove_list[0] = n_remove_loc; #endif //PS::Comm::gather(&n_remove_loc, 1, n_remove_list); if ( PS::Comm::getRank() == 0 ){ PS::S32 tmp_remove = 0; for ( PS::S32 i=0; i<n_proc; i++ ){ n_remove_adr[i] = tmp_remove; tmp_remove += n_remove_list[i]; } assert ( n_remove_glb == tmp_remove ); } for ( PS::S32 i=0; i<n_remove_loc; i++ ) { remove_list_loc[i] = pp[remove_list.at(i)]; } #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Gatherv(remove_list_loc, n_remove_loc, PS::GetDataType(*remove_list_loc), remove_list_glb, n_remove_list, n_remove_adr, PS::GetDataType(*remove_list_glb), 0, MPI_COMM_WORLD); #else for(PS::S32 i=0; i<n_remove_loc; i++) remove_list_glb[i] = remove_list_loc[i]; #endif //PS::Comm::gatherV(remove_list_loc, n_remove_loc, remove_list_glb, n_remove_list, n_remove_adr); if ( PS::Comm::getRank() == 0 ){ for ( PS::S32 i=0; i<n_remove_glb; i++ ) { PS::F64 massi = remove_list_glb[i].mass; PS::F64vec veli = remove_list_glb[i].vel; edisp_loc -= 0.5*massi* veli*veli; edisp_loc -= massi * remove_list_glb[i].phi_s; edisp_loc -= massi * remove_list_glb[i].phi_d; edisp_loc -= massi * remove_list_glb[i].phi; for ( PS::S32 j=0; j<i; j++ ) { if ( remove_list_glb[i].id != remove_list_glb[j].id ) { PS::F64 massj = remove_list_glb[j].mass; PS::F64vec posi = remove_list_glb[i].pos; PS::F64vec posj = remove_list_glb[j].pos; PS::F64 eps2 = EPGrav::eps2; PS::F64vec dr = posi - posj; PS::F64 rinv = 1./sqrt(dr*dr + eps2); edisp_loc += - massi * massj * rinv; } } std::cerr << "Remove Particle " << remove_list_glb[i].id << std::endl << "Position : " << std::setprecision(15) << remove_list_glb[i].pos << std::endl; fout_rem << std::fixed<<std::setprecision(8) << remove_list_glb[i].time << "\t" << remove_list_glb[i].id << "\t" << std::scientific << std::setprecision(15) << remove_list_glb[i].mass << "\t" << remove_list_glb[i].pos.x << "\t" << remove_list_glb[i].pos.y << "\t" << remove_list_glb[i].pos.z << "\t" << remove_list_glb[i].vel.x << "\t" << remove_list_glb[i].vel.y << "\t" << remove_list_glb[i].vel.z << std::endl; } delete [] n_remove_list; delete [] n_remove_adr; delete [] remove_list_glb; } delete [] remove_list_loc; #ifdef INDIRECT_TERM e_ind_before = calcIndirectEnergy(pp); #endif } if (n_remove_loc) pp.removeParticle(&remove_list[0], n_remove_loc); edisp += PS::Comm::getSum(edisp_loc); #ifdef INDIRECT_TERM if (n_remove_glb) { e_ind_after = calcIndirectEnergy(pp); edisp += e_ind_after - e_ind_before; } #endif return n_remove_glb; } template <class Tpsys> void correctEnergyForGas(Tpsys & pp, PS::F64 & edisp_gd, bool second) {// energy correction for gas drag PS::F64 edisp_gd_loc = 0.; PS::F64 coef = 0.25; if (second) coef *= -1.; const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for reduction(+:edisp_gd_loc) for(PS::S32 i=0; i<n_loc; i++){ edisp_gd_loc += pp[i].mass * pp[i].acc_gd * (pp[i].vel + coef * pp[i].acc_gd * FPGrav::dt_tree); } edisp_gd += 0.5 * FPGrav::dt_tree * PS::Comm::getSum(edisp_gd_loc); }
GB_bitmap_emult_template.c
//------------------------------------------------------------------------------ // GB_bitmap_emult_template: C = A.*B, C<M>=A.*B, and C<!M>=A.*B, C bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is bitmap. A and B are bitmap or full. M depends on the method { //-------------------------------------------------------------------------- // get C, A, and B //-------------------------------------------------------------------------- const int8_t *restrict Ab = A->b ; const int8_t *restrict Bb = B->b ; const int64_t vlen = A->vlen ; ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (A) || GB_as_if_full (B)) ; const bool A_iso = A->iso ; const bool B_iso = B->iso ; int8_t *restrict Cb = C->b ; const int64_t cnz = GB_nnz_held (C) ; #ifdef GB_ISO_EMULT ASSERT (C->iso) ; #else ASSERT (!C->iso) ; ASSERT (!(A_iso && B_iso)) ; // one of A or B can be iso, but not both const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // C=A.*B, C<M>=A.*B, or C<!M>=A.*B: C is bitmap //-------------------------------------------------------------------------- // TODO modify this method so it can modify C in-place, and also use the // accum operator. int64_t cnvals = 0 ; if (ewise_method == GB_EMULT_METHOD_05) { //---------------------------------------------------------------------- // C is bitmap, M is not present //---------------------------------------------------------------------- // ------------------------------------------ // C = A .* B // ------------------------------------------ // bitmap . bitmap bitmap (method: 05) // bitmap . bitmap full (method: 05) // bitmap . full bitmap (method: 05) int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (GBB (Ab, p) && GBB (Bb,p)) { // C (i,j) = A (i,j) + B (i,j) #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, p, A_iso) ; GB_GETB (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; #endif Cb [p] = 1 ; task_cnvals++ ; } } cnvals += task_cnvals ; } } else if (ewise_method == GB_EMULT_METHOD_06) { //---------------------------------------------------------------------- // C is bitmap, !M is sparse or hyper //---------------------------------------------------------------------- // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // bitmap sparse bitmap bitmap (method: 06) // bitmap sparse bitmap full (method: 06) // bitmap sparse full bitmap (method: 06) // M is sparse and complemented. If M is sparse and not // complemented, then C is constructed as sparse, not bitmap. ASSERT (M != NULL) ; ASSERT (Mask_comp) ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; // C(i,j) = A(i,j) .* B(i,j) can only be computed where M(i,j) is // not present in the sparse pattern of M, and where it is present // but equal to zero. //---------------------------------------------------------------------- // scatter M into the C bitmap //---------------------------------------------------------------------- GB_bitmap_M_scatter_whole (C, M, Mask_struct, GB_BITMAP_M_SCATTER_SET_2, M_ek_slicing, M_ntasks, M_nthreads, Context) ; // C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1. // These positions will not be computed in C(i,j). C(i,j) can only // be modified where Cb [p] is zero. int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { if (Cb [p] == 0) { // M(i,j) is zero, so C(i,j) can be computed if (GBB (Ab, p) && GBB (Bb, p)) { // C (i,j) = A (i,j) + B (i,j) #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, p, A_iso) ; GB_GETB (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; #endif Cb [p] = 1 ; task_cnvals++ ; } } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } } else // if (ewise_method == GB_EMULT_METHOD_07) { //---------------------------------------------------------------------- // C is bitmap; M is bitmap or full //---------------------------------------------------------------------- // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // bitmap bitmap bitmap bitmap (method: 07) // bitmap bitmap bitmap full (method: 07) // bitmap bitmap full bitmap (method: 07) // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // bitmap full bitmap bitmap (method: 07) // bitmap full bitmap full (method: 07) // bitmap full full bitmap (method: 07) // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // bitmap bitmap bitmap bitmap (method: 07) // bitmap bitmap bitmap full (method: 07) // bitmap bitmap full bitmap (method: 07) // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // bitmap full bitmap bitmap (method: 07) // bitmap full bitmap full (method: 07) // bitmap full full bitmap (method: 07) ASSERT (GB_IS_BITMAP (M) || GB_IS_FULL (M)) ; const int8_t *restrict Mb = M->b ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; size_t msize = M->type->size ; #undef GB_GET_MIJ #define GB_GET_MIJ(p) \ bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \ if (Mask_comp) mij = !mij ; /* TODO: use ^ */ int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) \ reduction(+:cnvals) for (tid = 0 ; tid < C_nthreads ; tid++) { int64_t pstart, pend, task_cnvals = 0 ; GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ; for (int64_t p = pstart ; p < pend ; p++) { GB_GET_MIJ (p) ; if (mij) { // M(i,j) is true, so C(i,j) can be computed if (GBB (Ab, p) && GBB (Bb, p)) { // C (i,j) = A (i,j) + B (i,j) #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, p, A_iso) ; GB_GETB (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; #endif Cb [p] = 1 ; task_cnvals++ ; } } else { // M(i,j) == 1, so C(i,j) is not computed Cb [p] = 0 ; } } cnvals += task_cnvals ; } } C->nvals = cnvals ; }
GB_unaryop__identity_int64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_uint16 // op(A') function: GB_tran__identity_int64_uint16 // C type: int64_t // A type: uint16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_uint16 ( int64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_mgr.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif #if defined(HYPRE_USING_CUDA) void hypre_NoGPUSupport(char *option) { char msg[256]; hypre_sprintf(msg, "Error: Chosen %s option is not currently supported on GPU\n\n", option); hypre_printf("%s ", msg); // hypre_error_w_msg(1, msg); hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1); } #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; #if defined(HYPRE_USING_CUDA) (mgr_data -> P_FF_array) = NULL; #endif (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> fsolver_mode) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> frelax_print_level) = 0; (mgr_data -> cg_print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if (mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if ((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if ((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if ((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if ((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if ((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if ((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if ((mgr_data -> use_default_cgrid_solver)) { if ((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i = 0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i = 0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) { hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); } hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i = 1; i < num_coarse_levels + 1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]); } if ((mgr_data -> RT_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]); } hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]); } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } } #if defined(HYPRE_USING_CUDA) if (mgr_data -> P_FF_array) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> P_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_FF_array)[i]); } } //hypre_TFree(P_FF_array, hypre_HandleMemoryLocation(hypre_handle())); hypre_TFree((mgr_data -> P_FF_array), HYPRE_MEMORY_HOST); } #endif /* AMG for Frelax */ if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i = 1; i < num_coarse_levels + 1; i++) { if (mgr_data -> F_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); } if (mgr_data -> U_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } } if (mgr_data -> fsolver_mode > 0) { if ((mgr_data -> A_ff_array)[0]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if (mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } } if (mgr_data -> fsolver_mode == 2) { if ((mgr_data -> aff_solver)[0]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if ((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if ((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if ((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if ((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if ((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if ((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if ((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if ((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if ((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if (mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) { hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); } if ((mgr_data -> diaginv)) { hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); } if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i = 1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); } if (hypre_ParAMGDataPArray(vdata)[i - 1]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]); } hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } if (num_levels < 1) { hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); } if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if ((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array + i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = block_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n"); return hypre_error_flag; } if (reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if ((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if (reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for (i = 0; i < reserved_coarse_size; i++) { reserved_coarse_indexes[i] = reserved_cpt_index[i]; } } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if (cflag) { if (*CF_marker_ptr != NULL) { hypre_IntArrayDestroy(*CF_marker_ptr); } *CF_marker_ptr = hypre_IntArrayCreate(nloc); hypre_IntArrayInitialize(*CF_marker_ptr); hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* first mark fixed coarse set */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row < nloc; row++) { if (CF_marker[row] == CMRK) { continue; } CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row < nloc; row++) { /* loop through new c-points */ if (CF_marker[row] == CMRK) { nc++; } else if (CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if ( nc == fixed_coarse_size) { last_level = 1; } //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } return hypre_error_flag; } HYPRE_Int hypre_ExtendWtoPHost(HYPRE_Int P_nr_of_rows, HYPRE_Int *CF_marker, HYPRE_Int *W_diag_i, HYPRE_Int *W_diag_j, HYPRE_Complex *W_diag_data, HYPRE_Int *P_diag_i, HYPRE_Int *P_diag_j, HYPRE_Complex *P_diag_data, HYPRE_Int *W_offd_i, HYPRE_Int *P_offd_i ) { HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_Int i, jj; HYPRE_Real one = 1.0; /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, P_nr_of_rows, HYPRE_MEMORY_HOST); for (i = 0; i < P_nr_of_rows; i++) { fine_to_coarse[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { fine_to_coarse[i] = coarse_counter; coarse_counter++; } } /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; row_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = W_diag_i[row_counter]; jj < W_diag_i[row_counter + 1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = W_diag_j[jj]; P_diag_data[jj_counter] = W_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_counter_offd += W_offd_i[row_counter + 1] - W_offd_i[row_counter]; row_counter++; } /* update off-diagonal row pointer */ P_offd_i[i + 1] = jj_counter_offd; } P_diag_i[P_nr_of_rows] = jj_counter; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); return 0; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildPHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; HYPRE_Int A_nr_of_rows = hypre_ParCSRMatrixNumRows(A); hypre_ParCSRMatrix *A_FF = NULL, *A_FC = NULL, *P = NULL; hypre_CSRMatrix *W_diag = NULL, *W_offd = NULL; HYPRE_Int P_diag_nnz, nfpoints; HYPRE_Int *P_diag_i = NULL, *P_diag_j = NULL, *P_offd_i = NULL; HYPRE_Complex *P_diag_data = NULL, *diag = NULL, *diag1 = NULL; HYPRE_BigInt nC_global; HYPRE_Int i; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); nfpoints = 0; for (i = 0; i < A_nr_of_rows; i++) { if (CF_marker[i] == -1) { nfpoints++; } } if (method > 0) { hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, NULL, &A_FC, &A_FF); diag = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); if (method == 1) { // extract diag inverse sqrt // hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 3); // L1-Jacobi-type interpolation HYPRE_Complex scal = 1.0; diag1 = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 0); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FF), NULL, NULL, diag1, 1, 1.0, "set"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FF), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); for (i = 0; i < nfpoints; i++) { HYPRE_Complex dsum = diag[i] + scal * (diag1[i] - hypre_cabs(diag[i])); diag[i] = 1. / dsum; } hypre_TFree(diag1, memory_location_P); } else if (method == 2) { // extract diag inverse hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 2); } for (i = 0; i < nfpoints; i++) { diag[i] = -diag[i]; } hypre_Vector *D_FF_inv = hypre_SeqVectorCreate(nfpoints); hypre_VectorData(D_FF_inv) = diag; hypre_SeqVectorInitialize_v2(D_FF_inv, memory_location_P); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixDiag(A_FC), D_FF_inv, NULL); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixOffd(A_FC), D_FF_inv, NULL); hypre_SeqVectorDestroy(D_FF_inv); W_diag = hypre_ParCSRMatrixDiag(A_FC); W_offd = hypre_ParCSRMatrixOffd(A_FC); nC_global = hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { W_diag = hypre_CSRMatrixCreate(nfpoints, A_nr_of_rows - nfpoints, 0); W_offd = hypre_CSRMatrixCreate(nfpoints, 0, 0); hypre_CSRMatrixInitialize_v2(W_diag, 0, memory_location_P); hypre_CSRMatrixInitialize_v2(W_offd, 0, memory_location_P); if (my_id == (num_procs - 1)) { nC_global = num_cpts_global[1]; } hypre_MPI_Bcast(&nC_global, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } /* Construct P from matrix product W_diag */ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Complex, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( A_nr_of_rows, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // finalize P P = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), nC_global, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; if (method > 0) { hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixNumNonzeros(P) = hypre_ParCSRMatrixNumNonzeros( A_FC) + hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { hypre_ParCSRMatrixNumNonzeros(P) = nC_global; } hypre_ParCSRMatrixDNumNonzeros(P) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(P); hypre_MatvecCommPkgCreate(P); *P_ptr = P; if (A_FF) { hypre_ParCSRMatrixDestroy(A_FF); } if (A_FC) { hypre_ParCSRMatrixDestroy(A_FC); } if (method <= 0) { hypre_CSRMatrixDestroy(W_diag); hypre_CSRMatrixDestroy(W_offd); } return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { A_offd_data[j] *= factor; } } return (0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; hypre_assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i == i1 ) { D_ff_inv[i] = -1.0 / A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P, P_mod, 1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); if (Pmax > 0) { if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, memory_location); hypre_TFree(A_h_correction_diag_j, memory_location); hypre_TFree(A_h_correction_diag_data, memory_location); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); } if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); } if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); } hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n"); exit(-1); } } //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // num_threads = hypre_NumThreads(); // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i + 1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *W; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Int *P_offd_i; HYPRE_Int P_diag_nnz; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; HYPRE_Int i; HYPRE_Real m_one = -1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff"); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); // hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); // hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); W = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixScale(W, m_one); // hypre_ParCSRMatrixPrintIJ(W, 1, 1, "Wp"); hypre_CSRMatrix *W_diag = hypre_ParCSRMatrixDiag(W); hypre_CSRMatrix *W_offd = hypre_ParCSRMatrixOffd(W); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( n_fine, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // final P P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(W); if (hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(P))) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(P)) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(W); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Interpolation for each level */ if (interp_type < 3) { if (exec == HYPRE_EXEC_HOST) { // hypre_MGRBuildP(A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr); hypre_MGRBuildPHost(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_device"); } #endif /* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */ /* if(interp_type == 2) { for(i=0; i<numsweeps; i++) { hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } */ } else if (interp_type == 4) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } /* else if (interp_type == 99) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } */ else if (interp_type == 5) { hypre_BoomerAMGBuildModExtInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 6) { hypre_BoomerAMGBuildModExtPIInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 7) { hypre_BoomerAMGBuildModExtPEInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } /* Restriction for each level */ if (restrict_type == 0) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 1 || restrict_type == 2) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(AT, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 3) { /* move diagonal to first entry */ hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT)); hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &R_ptr); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42; const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43; const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42; const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33; const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43; const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41; const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43; const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31; const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41; const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42; const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41; const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32; const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42; const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41; const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42; const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31; const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0 / det; a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv; a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv; a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv; a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i, j, k, l, u, kn, in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k = 0; k < n; ++k) { kn = k * n; l = kn + k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0 / a[l]; a[l] = alinv; for (j = 0; j < k; ++j) { u = kn + j; a[u] *= alinv; } for (j = k + 1; j < n; ++j) { u = kn + j; a[u] *= alinv; } for (i = 0; i < k; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = k + 1; i < n; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = 0; i < k; ++i) { u = i * n + k; a[u] *= -alinv; } for (i = k + 1; i < n; ++i) { u = i * n + k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i, ii; HYPRE_Int j, jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size, inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0; k < blk_size; k++) { B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; B_diag_j[bidx] = i * blk_size + j; B_diag_data[bidx] = diaginv[k * blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; *B_ptr = B; return (block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; if (diaginv != NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return (relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) { aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); } /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> fsolver_mode) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for F-relaxation solver */ HYPRE_Int hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> frelax_print_level) = print_level; return hypre_error_flag; } /* Set print level for coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> cg_print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set logging level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_IntArray *coarse_dof_func_ptr = NULL; HYPRE_BigInt num_row_cpts_global[2]; HYPRE_BigInt num_col_cpts_global[2]; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); hypre_IntArray *wrap_cf; // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ wrap_cf = hypre_IntArrayCreate(local_numrows); hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST; hypre_IntArrayData(wrap_cf) = row_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_row_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; } hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /* get the number of coarse rows */ hypre_IntArrayData(wrap_cf) = col_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_col_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; } hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; coarse_counter[i + 1] += coarse_counter[i]; col_coarse_counter[i + 1] += col_coarse_counter[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { Ablock_marker[i] = 0; } num_cols_Ablock_offd = 0; for (i = 0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } *A_block_ptr = Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return (0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if ((mgr_data -> fsolver_mode) >= 0) { hypre_printf("Use AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> fsolver_mode)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols = 1, prows = 1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST); for (i = 0; i < (num_rows + 1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu), global_num_rows, global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local), big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows * pcols <= num_procs) { ++prows; } --prows; pcols = num_procs / prows; while (prows * pcols != num_procs) { prows -= 1; pcols = num_procs / prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
bli_axpyv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double* restrict alpha, double* restrict x, inc_t incx, double* restrict y, inc_t incy, cntx_t* restrict cntx ) { if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool_t use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition = nullptr; /// \brief Location of ':' (if any). SourceLocation ColonLoc; /// \brief Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = OMPD_unknown; /// \brief Name modifier location. SourceLocation NameModifierLoc; /// \brief Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// \brief Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// \brief Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// \brief Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// \brief Build an empty clause. OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// \brief Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// \brief Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition = nullptr; /// \brief Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// \brief Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// \brief Build an empty clause. OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen = nullptr; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } }; /// \brief This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Simdlen = nullptr; /// \brief Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// \brief Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// \brief Build an empty clause. explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops = nullptr; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// \brief Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// \brief Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize = nullptr; /// \brief Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// \brief Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// \brief Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// \brief Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// \brief Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// \brief Build an empty clause. explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// \brief Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// \brief Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// \brief Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// \brief Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// \brief Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops = nullptr; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. explicit OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// \brief Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_in_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// \brief Location of linear modifier if any. SourceLocation ModifierLoc; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// \brief Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// \brief Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// \brief This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// \brief Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// \brief Dependency type location. SourceLocation DepLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. explicit OMPDependClause(unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// \brief Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. static OMPDependClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// \brief Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Set the loop counter value for the depend clauses with 'sink|source' kind /// of dependency. Required for codegen. void setCounterValue(Expr *V); /// Get the loop counter value. Expr *getCounterValue(); /// Get the loop counter value. const Expr *getCounterValue() const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// \brief This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Device number. Stmt *Device = nullptr; /// \brief Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } public: /// \brief Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(Expr *E, Stmt *HelperE, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Device(E) { setPreInitStmt(HelperE); } /// \brief Build an empty clause. OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// \brief Return device number. Expr *getDevice() const { return cast<Expr>(Device); } child_range children() { return child_range(&Device, &Device + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } }; /// \brief This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// \brief Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } }; /// \brief This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// \brief Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } }; /// \brief Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: // \brief Class that represents a component of a mappable expression. E.g. // for an expression S.a, the first component is a declaration reference // expression associated with 'S' and the second is a member expression // associated with the field declaration 'a'. If the expression is an array // subscript it may not have any associated declaration. In that case the // associated declaration is set to nullptr. class MappableComponent { // \brief Expression associated with the component. Expr *AssociatedExpression = nullptr; // \brief Declaration associated with the declaration. If the component does // not have a declaration (e.g. array subscripts or section), this is set to // nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // \brief List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // \brief List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // \brief Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // \brief Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<ValueDecl *> Declarations); }; /// \brief This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// \brief Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// \brief Number of component lists in this clause. unsigned NumComponentLists; /// \brief Total number of components in this clause. unsigned NumComponents; protected: /// \brief Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause - one /// list for each expression in the clause. /// \param NumComponents Total number of expression components in the clause. OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} /// \brief Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// \brief Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// \brief Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } public: /// \brief Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// \brief Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// \brief Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// \brief Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// \brief Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// \brief Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// \brief Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// \brief Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } }; /// \brief This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Map type modifier for the 'map' clause. OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; /// \brief Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// \brief Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// \brief Location of the map type. SourceLocation MapLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapTypeModifier Map type modifier. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(MapTypeModifier), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Set type modifier for the clause. /// /// \param T Type Modifier for the clause. void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; } /// \brief Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// \brief Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param TypeModifier Map type modifier. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, OpenMPMapClauseKind TypeModifier, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// \brief Creates an empty clause with the place for for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); /// \brief Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// \brief Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// \brief Fetches the map type modifier for the clause. OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY { return MapTypeModifier; } /// \brief Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } }; /// \brief This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief NumTeams number. Stmt *NumTeams = nullptr; /// \brief Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// \brief Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// \brief Build an empty clause. OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// \brief Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } }; /// \brief This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief ThreadLimit number. Stmt *ThreadLimit = nullptr; /// \brief Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// \brief Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// \brief Build an empty clause. OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// \brief Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } }; /// \brief This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Priority number. Stmt *Priority = nullptr; /// \brief Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// \brief Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// \brief Build an empty clause. OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// \brief Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } }; /// \brief This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Grainsize = nullptr; /// \brief Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// \brief Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// \brief Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } }; /// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// \brief Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// \brief Build an empty clause. OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } }; /// \brief This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *NumTasks = nullptr; /// \brief Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// \brief Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// \brief Build an empty clause. explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } }; /// \brief This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// \brief Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// \brief Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// \brief Build an empty clause. OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } }; /// \brief This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// \brief Start location of the schedule kind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize = nullptr; /// \brief Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// \brief Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// \brief Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// \brief Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } }; /// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// \brief Locations of modifiers. SourceLocation ModifierLoc; /// \brief A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// \brief Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// \brief Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// \brief Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// \brief Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// \brief Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// \brief Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// \brief Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// \brief Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// \brief Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// \brief Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } }; /// \brief This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } }; /// \brief This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPUseDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPUseDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPIsDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPIsDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
GB_assign_zombie3.c
//------------------------------------------------------------------------------ // GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all // entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is // zero. This step is not done for GxB_*_subassign, since that method does not // modify anything outside IxJ. // GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other. #include "GB_assign.h" void GB_assign_zombie3 ( GrB_Matrix Z, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const int64_t j, // vector index with entries to delete const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // get Z (:,j) //-------------------------------------------------------------------------- const int64_t *restrict Zh = Z->h ; const int64_t *restrict Zp = Z->p ; int64_t *restrict Zi = Z->i ; int64_t pZ_start, pZ_end, pleft = 0, pright = Z->nvec-1 ; GB_lookup (Z->is_hyper, Zh, Zp, &pleft, pright, j, &pZ_start, &pZ_end) ; int64_t nzombies = Z->nzombies ; const int64_t zjnz = pZ_end - pZ_start ; //-------------------------------------------------------------------------- // get M(:,0) //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = M->x ; const size_t msize = M->type->size ; const GB_cast_function cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ; int64_t pM_start = Mp [0] ; int64_t pM_end = Mp [1] ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // delete entries from Z(:,j) that are outside I, if the mask M allows it //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { int64_t p1, p2 ; GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ; for (int64_t pZ = pZ_start + p1 ; pZ < pZ_start + p2 ; pZ++) { //------------------------------------------------------------------ // get Z(i,j) //------------------------------------------------------------------ int64_t i = Zi [pZ] ; if (!GB_IS_ZOMBIE (i)) { //-------------------------------------------------------------- // Z(i,j) is outside Z(I,j) if i is not in the list I //-------------------------------------------------------------- bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ; if (i_outside) { //---------------------------------------------------------- // Z(i,j) is a live entry not in the Z(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. int64_t pM = pM_start ; int64_t pright = pM_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Mi, pM, pright, found) ; bool mij = false ; if (found) { // found it cast_M (&mij, Mx +(pM*msize), 0) ; } if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete Z(i,j) by marking it as a zombie nzombies++ ; Zi [pZ] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- Z->nzombies = nzombies ; }
DRB108-atomic-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include "omprace.h" #include <omp.h> /* * Test if atomic can be recognized properly. No data races. * */ int main (void) { omprace_init(); int a=0; #pragma omp parallel { #pragma omp atomic a+=1; } printf ("a=%d\n",a); omprace_fini(); return 0; }
write_output.c
/* This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME */ /* Here, the output is written to grib and/or netcdf files and integrals are written to text files if configured that way. In addition to that, some postprocessing diagnostics are also calculated here. */ #include <stdio.h> #include <string.h> #include <time.h> #include <netcdf.h> #include <eccodes.h> #include <geos95.h> #include <atmostracers.h> #include "../game_types.h" #include "../game_constants.h" #include "io.h" #include "../thermodynamics/thermodynamics.h" #include "../spatial_operators/spatial_operators.h" #define ERRCODE 3 #define ECCERR(e) {printf("Error: Eccodes failed with error code %d. See http://download.ecmwf.int/test-data/eccodes/html/group__errors.html for meaning of the error codes.\n", e); exit(ERRCODE);} #define NCERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(2);} // the number of pressure levels for the pressure level output const int NO_OF_PRESSURE_LEVELS = 6; int set_basic_props2grib(codes_handle *, long, long, long, long, long, long); double calc_std_dev(double [], int); int global_scalar_integrator(Scalar_field, Grid *, double *); double pseudopotential(State *, Grid *, int); int get_pressure_levels(double []); int write_out(State *state_write_out, double wind_h_lowest_layer_array[], int min_no_of_output_steps, double t_init, double t_write, Diagnostics *diagnostics, Forcings *forcings, Grid *grid, Dualgrid *dualgrid, Config_io *config_io, Config *config, Soil *soil) { printf("Writing output ...\n"); // Diagnostics, forcings and radiation are primarily handed over for checks. // Time stuff. time_t t_init_t = (time_t) t_init; // t_init is in UTC struct tm *p_init_time = gmtime(&t_init_t); int init_year = p_init_time -> tm_year; int init_month = p_init_time -> tm_mon; int init_day = p_init_time -> tm_mday; int init_hour = p_init_time -> tm_hour; long data_date = 10000*(init_year + 1900) + 100*(init_month + 1) + init_day; long data_time = 100*init_hour; // Needed for netcdf. int retval; int err = 0; int layer_index, closest_index, second_closest_index; double wind_u_value, wind_v_value, cloudy_box_counter; double vector_to_minimize[NO_OF_LAYERS]; double min_density_cloudy_box = 1e-4; double *grib_output_field = malloc(NO_OF_LATLON_IO_POINTS*sizeof(double)); // diagnosing the temperature temperature_diagnostics(state_write_out, grid, diagnostics); /* Surface output including diagnostics. ------------------------------------- */ if (config_io -> surface_output_switch == 1) { double *mslp = malloc(NO_OF_SCALARS_H*sizeof(double)); double *surface_p = malloc(NO_OF_SCALARS_H*sizeof(double)); double *t2 = malloc(NO_OF_SCALARS_H*sizeof(double)); double *tcdc = malloc(NO_OF_SCALARS_H*sizeof(double)); double *rprate = malloc(NO_OF_SCALARS_H*sizeof(double)); double *sprate = malloc(NO_OF_SCALARS_H*sizeof(double)); double *cape = malloc(NO_OF_SCALARS_H*sizeof(double)); double temp_lowest_layer, pressure_value, mslp_factor, surface_p_factor, temp_mslp, temp_surface, z_height, theta, cape_integrand, delta_z, temp_closest, temp_second_closest, delta_z_temp, temperature_gradient, theta_e; double z_tropopause = 12e3; double standard_vert_lapse_rate = 0.0065; #pragma omp parallel for private(temp_lowest_layer, pressure_value, mslp_factor, surface_p_factor, temp_mslp, temp_surface, z_height, theta, cape_integrand, delta_z, temp_closest, temp_second_closest, delta_z_temp, temperature_gradient, theta_e, layer_index, closest_index, second_closest_index, cloudy_box_counter, vector_to_minimize) for (int i = 0; i < NO_OF_SCALARS_H; ++i) { // Now the aim is to determine the value of the MSLP. temp_lowest_layer = diagnostics -> temperature_gas[(NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i]; pressure_value = density_gas(state_write_out, (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i) *gas_constant_diagnostics(state_write_out, (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i, config) *temp_lowest_layer; temp_mslp = temp_lowest_layer + standard_vert_lapse_rate*grid -> z_scalar[i + (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H]; mslp_factor = pow(1 - (temp_mslp - temp_lowest_layer)/temp_mslp, grid -> gravity_m[(NO_OF_LAYERS - 1)*NO_OF_VECTORS_PER_LAYER + i]/ (gas_constant_diagnostics(state_write_out, (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i, config)*standard_vert_lapse_rate)); mslp[i] = pressure_value/mslp_factor; // Now the aim is to determine the value of the surface pressure. temp_surface = temp_lowest_layer + standard_vert_lapse_rate*(grid -> z_scalar[i + (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H] - grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + i]); surface_p_factor = pow(1 - (temp_surface - temp_lowest_layer)/temp_surface, grid -> gravity_m[(NO_OF_LAYERS - 1)*NO_OF_VECTORS_PER_LAYER + i]/ (gas_constant_diagnostics(state_write_out, (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i, config)*standard_vert_lapse_rate)); surface_p[i] = pressure_value/surface_p_factor; // Now the aim is to calculate the 2 m temperature. for (int j = 0; j < NO_OF_LAYERS; ++j) { vector_to_minimize[j] = fabs(grid -> z_vector[NO_OF_LAYERS*NO_OF_VECTORS_PER_LAYER + i] + 2 - grid -> z_scalar[i + j*NO_OF_SCALARS_H]); } closest_index = find_min_index(vector_to_minimize, NO_OF_LAYERS); temp_closest = diagnostics -> temperature_gas[closest_index*NO_OF_SCALARS_H + i]; delta_z_temp = grid -> z_vector[NO_OF_LAYERS*NO_OF_VECTORS_PER_LAYER + i] + 2 - grid -> z_scalar[i + closest_index*NO_OF_SCALARS_H]; // real radiation if (config -> rad_on == 1) { temperature_gradient = (temp_closest - soil -> temperature[i])/(grid -> z_scalar[i + closest_index*NO_OF_SCALARS_H] - grid -> z_vector[NO_OF_LAYERS*NO_OF_VECTORS_PER_LAYER + i]); } // no real radiation else { second_closest_index = closest_index - 1; if (grid -> z_scalar[i + closest_index*NO_OF_SCALARS_H] > grid -> z_vector[NO_OF_LAYERS*NO_OF_VECTORS_PER_LAYER + i] + 2 && closest_index < NO_OF_LAYERS - 1) { second_closest_index = closest_index + 1; } temp_second_closest = diagnostics -> temperature_gas[second_closest_index*NO_OF_SCALARS_H + i]; // calculating the vertical temperature gradient that will be used for the extrapolation temperature_gradient = (temp_closest - temp_second_closest)/(grid -> z_scalar[i + closest_index*NO_OF_SCALARS_H] - grid -> z_scalar[i + second_closest_index*NO_OF_SCALARS_H]); } // performing the interpolation / extrapolation to two meters above the surface t2[i] = temp_closest + delta_z_temp*temperature_gradient; // diagnozing CAPE // initializing CAPE with zero cape[i] = 0; layer_index = NO_OF_LAYERS - 1; z_height = grid -> z_scalar[layer_index*NO_OF_SCALARS_H + i]; // pseduopotential temperature of the particle in the lowest layer theta_e = pseudopotential(state_write_out, grid, (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i); while (z_height < z_tropopause) { // full potential temperature in the grid box theta = grid -> theta_bg[layer_index*NO_OF_SCALARS_H + i] + state_write_out -> theta_pert[layer_index*NO_OF_SCALARS_H + i]; // thickness of the gridbox delta_z = grid -> z_vector[layer_index*NO_OF_VECTORS_PER_LAYER + i] - grid -> z_vector[(layer_index + 1)*NO_OF_VECTORS_PER_LAYER + i]; // this is the candidate that we might want to add to the integral cape_integrand = grid -> gravity_m[(NO_OF_LAYERS - 1)*NO_OF_VECTORS_PER_LAYER + i]*(theta_e - theta)/theta; // we do not add negative values to CAPE (see the definition of CAPE) if (cape_integrand > 0) { cape[i] += cape_integrand*delta_z; } --layer_index; z_height = grid -> z_scalar[layer_index*NO_OF_SCALARS_H + i]; } // Now come the hydrometeors. if (NO_OF_CONDENSED_CONSTITUENTS == 4) { cloudy_box_counter = 0; for (int k = 0; k < NO_OF_LAYERS; ++k) { if (state_write_out -> rho[k*NO_OF_SCALARS_H + i] > min_density_cloudy_box || state_write_out -> rho[NO_OF_SCALARS + k*NO_OF_SCALARS_H + i] > min_density_cloudy_box || state_write_out -> rho[2*NO_OF_SCALARS + k*NO_OF_SCALARS_H + i] > min_density_cloudy_box || state_write_out -> rho[3*NO_OF_SCALARS + k*NO_OF_SCALARS_H + i] > min_density_cloudy_box) { cloudy_box_counter += 1; } } tcdc[i] = fmin(100*cloudy_box_counter/(NO_OF_LAYERS/10.0), 100); } else { tcdc[i] = 0; } // solid precipitation rate sprate[i] = 0; if (NO_OF_CONDENSED_CONSTITUENTS == 4) { sprate[i] += config -> precipitation_droplets_velocity*state_write_out -> rho[(NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i]; sprate[i] += config -> cloud_droplets_velocity*state_write_out -> rho[2*NO_OF_SCALARS + (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i]; } // liquid precipitation rate rprate[i] = 0; if (NO_OF_CONDENSED_CONSTITUENTS == 4) { rprate[i] += config -> precipitation_droplets_velocity*state_write_out -> rho[NO_OF_SCALARS + (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i]; rprate[i] += config -> cloud_droplets_velocity*state_write_out -> rho[3*NO_OF_SCALARS + (NO_OF_LAYERS - 1)*NO_OF_SCALARS_H + i]; } } /* 10 m wind diagnostics --------------------- */ double wind_tangential; int j; // 10 m wind is measured over grass by WMO definition double *wind_10_m_speed = malloc(min_no_of_output_steps*NO_OF_VECTORS_H*sizeof(double)); double *wind_10_m_mean_u = malloc(NO_OF_VECTORS_H*sizeof(double)); double *wind_10_m_mean_v = malloc(NO_OF_VECTORS_H*sizeof(double)); // loop over the horizontal vector points #pragma omp parallel for private(j, wind_tangential) for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index) { // initializing the means with zero wind_10_m_mean_u[h_index] = 0; wind_10_m_mean_v[h_index] = 0; // loop over the time steps for (int time_step_10_m_wind = 0; time_step_10_m_wind < min_no_of_output_steps; ++time_step_10_m_wind) { j = time_step_10_m_wind*NO_OF_VECTORS_H + h_index; wind_tangential = 0; for (int i = 0; i < 10; ++i) { wind_tangential += grid -> trsk_weights[10*h_index + i]*wind_h_lowest_layer_array[time_step_10_m_wind*NO_OF_VECTORS_H + grid -> trsk_indices[10*h_index + i]]; } wind_10_m_speed[j] = sqrt(pow(wind_h_lowest_layer_array[j], 2) + pow(wind_tangential, 2)); wind_10_m_mean_u[h_index] += 1.0/min_no_of_output_steps*wind_h_lowest_layer_array[j]; wind_10_m_mean_v[h_index] += 1.0/min_no_of_output_steps*wind_tangential; } } double roughness_length_extrapolation, actual_roughness_length, z_agl, rescale_factor; #pragma omp parallel for private(wind_u_value, wind_v_value, roughness_length_extrapolation, actual_roughness_length, z_agl, rescale_factor) for (int i = 0; i < NO_OF_VECTORS_H; ++i) { actual_roughness_length = 0.5*(grid -> roughness_length[grid -> from_index[i]] + grid -> roughness_length[grid -> to_index[i]]); roughness_length_extrapolation = ROUGHNESS_LENGTH_GRASS; if (grid -> is_land[grid -> from_index[i]] == 0) { roughness_length_extrapolation = actual_roughness_length; } z_agl = 0.5*(grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> from_index[i]] + grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> to_index[i]]); passive_turn(wind_10_m_mean_u[i], wind_10_m_mean_v[i], -grid -> direction[i], &wind_u_value, &wind_v_value); // rescale factor for computing the wind in a height of 10 m rescale_factor = log(10.0/roughness_length_extrapolation)/log((grid -> z_vector[NO_OF_VECTORS - NO_OF_VECTORS_PER_LAYER + i] - z_agl)/actual_roughness_length); wind_10_m_mean_u[i] = rescale_factor*wind_u_value; wind_10_m_mean_v[i] = rescale_factor*wind_v_value; } // diagnozing gusts at 10 m above the surface double standard_deviation; double gusts_parameter = 3; double *wind_10_m_gusts_speed = malloc(NO_OF_VECTORS_H*sizeof(double)); double *vector_for_std_deviation = malloc(min_no_of_output_steps*sizeof(double)); double wind_speed_10_m_mean; // loop over all horizontal vectors #pragma omp parallel for private(wind_speed_10_m_mean, standard_deviation) for (int i = 0; i < NO_OF_VECTORS_H; ++i) { // initializing the mean with zero wind_speed_10_m_mean = 0; // loop over all steps that are in the 10 minutes window around the output time for (int j = 0; j < min_no_of_output_steps; ++j) { // collecting all the wind speed values at this data point vector_for_std_deviation[j] = wind_10_m_speed[j*NO_OF_VECTORS_H + i]; // updating the mean wind speed wind_speed_10_m_mean += 1.0/min_no_of_output_steps*wind_10_m_speed[j*NO_OF_VECTORS_H + i]; } // calculating the standard deviation standard_deviation = calc_std_dev(vector_for_std_deviation, min_no_of_output_steps); // this is the case where the gust diagnostics is actually used if (t_write != t_init && min_no_of_output_steps >= 10) { wind_10_m_gusts_speed[i] = wind_speed_10_m_mean + gusts_parameter*standard_deviation; } // This is the case at the first step or if not enough steps in the output window are available. else { wind_10_m_gusts_speed[i] = (1 + 0.2)*wind_speed_10_m_mean; } } // freeing memory we do not need anymore free(vector_for_std_deviation); free(wind_10_m_speed); // allocating memory for output diagnostics double *wind_10_m_mean_u_at_cell = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_10_m_mean_v_at_cell = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_10_m_gusts_speed_at_cell = malloc(NO_OF_SCALARS_H*sizeof(double)); // averaging the wind quantities to cell centers for output edges_to_cells_lowest_layer(wind_10_m_mean_u, wind_10_m_mean_u_at_cell, grid); free(wind_10_m_mean_u); edges_to_cells_lowest_layer(wind_10_m_mean_v, wind_10_m_mean_v_at_cell, grid); free(wind_10_m_mean_v); edges_to_cells_lowest_layer(wind_10_m_gusts_speed, wind_10_m_gusts_speed_at_cell, grid); free(wind_10_m_gusts_speed); // Netcdf output. if (config_io -> netcdf_output_switch == 1) { char OUTPUT_FILE_PRE[300]; sprintf(OUTPUT_FILE_PRE, "%s+%ds_surface.nc", config_io -> run_id, (int) (t_write - t_init)); char OUTPUT_FILE[strlen(OUTPUT_FILE_PRE) + 1]; sprintf(OUTPUT_FILE, "%s+%ds_surface.nc", config_io -> run_id, (int) (t_write - t_init)); int scalar_h_dimid, mslp_id, ncid, retval, surface_p_id, rprate_id, sprate_id, cape_id, tcdc_id, t2_id, u10_id, v10_id, gusts_id; if ((retval = nc_create(OUTPUT_FILE, NC_CLOBBER, &ncid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "scalar_index_h", NO_OF_SCALARS_H, &scalar_h_dimid))) NCERR(retval); // Defining the variables. if ((retval = nc_def_var(ncid, "mslp", NC_DOUBLE, 1, &scalar_h_dimid, &mslp_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, mslp_id, "units", strlen("Pa"), "Pa"))) NCERR(retval); if ((retval = nc_def_var(ncid, "surface_p", NC_DOUBLE, 1, &scalar_h_dimid, &surface_p_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, surface_p_id, "units", strlen("Pa"), "Pa"))) NCERR(retval); if ((retval = nc_def_var(ncid, "t2", NC_DOUBLE, 1, &scalar_h_dimid, &t2_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, t2_id, "units", strlen("K"), "K"))) NCERR(retval); if ((retval = nc_def_var(ncid, "tcdc", NC_DOUBLE, 1, &scalar_h_dimid, &tcdc_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, tcdc_id, "units", strlen("%"), "%"))) NCERR(retval); if ((retval = nc_def_var(ncid, "rprate", NC_DOUBLE, 1, &scalar_h_dimid, &rprate_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, rprate_id, "units", strlen("kg/(m^2s)"), "kg/(m^2s)"))) NCERR(retval); if ((retval = nc_def_var(ncid, "sprate", NC_DOUBLE, 1, &scalar_h_dimid, &sprate_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, sprate_id, "units", strlen("kg/(m^2s)"), "kg/(m^2s)"))) NCERR(retval); if ((retval = nc_def_var(ncid, "cape", NC_DOUBLE, 1, &scalar_h_dimid, &cape_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, cape_id, "units", strlen("J/kg"), "J/kg"))) NCERR(retval); if ((retval = nc_def_var(ncid, "10u", NC_DOUBLE, 1, &scalar_h_dimid, &u10_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, cape_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_def_var(ncid, "10v", NC_DOUBLE, 1, &scalar_h_dimid, &v10_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, cape_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_def_var(ncid, "10gusts", NC_DOUBLE, 1, &scalar_h_dimid, &gusts_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, cape_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_enddef(ncid))) NCERR(retval); if ((retval = nc_put_var_double(ncid, mslp_id, &mslp[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, surface_p_id, &surface_p[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, t2_id, &t2[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, tcdc_id, &tcdc[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, rprate_id, &rprate[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, sprate_id, &sprate[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, cape_id, &cape[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, u10_id, &wind_10_m_mean_u_at_cell[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, v10_id, &wind_10_m_mean_v_at_cell[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, gusts_id, &wind_10_m_gusts_speed_at_cell[0]))) NCERR(retval); // Closing the netcdf file. if ((retval = nc_close(ncid))) NCERR(retval); } // Grib output. if (config_io -> grib_output_switch == 1) { long unsigned tcc_string_length = 4; long unsigned cape_string_length = 5; char OUTPUT_FILE_PRE[300]; sprintf(OUTPUT_FILE_PRE, "%s+%ds_surface.grb2", config_io -> run_id, (int) (t_write - t_init)); char OUTPUT_FILE[strlen(OUTPUT_FILE_PRE) + 1]; sprintf(OUTPUT_FILE, "%s+%ds_surface.grb2", config_io -> run_id, (int) (t_write - t_init)); char *SAMPLE_FILENAME = "../../src/io/grib_template.grb2"; FILE *SAMPLE_FILE; if (t_init < 0) exit(1); FILE *OUT_GRIB; OUT_GRIB = fopen(OUTPUT_FILE, "w+"); codes_handle *handle_wind_u_10m_mean = NULL; codes_handle *handle_wind_v_10m_mean = NULL; codes_handle *handle_mslp = NULL; codes_handle *handle_surface_p = NULL; codes_handle *handle_t2 = NULL; codes_handle *handle_tcdc = NULL; codes_handle *handle_rprate = NULL; codes_handle *handle_sprate = NULL; codes_handle *handle_wind_10m_gusts = NULL; codes_handle *handle_cape = NULL; SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_surface_p = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_surface_p, data_date, data_time, t_write, t_init, 3, 0); if ((retval = codes_set_long(handle_surface_p, "typeOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_surface_p, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_surface_p, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_surface_p, "level", 0))) ECCERR(retval); interpolate_to_ll(surface_p, grib_output_field, grid); if ((retval = codes_set_double_array(handle_surface_p, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_surface_p, OUTPUT_FILE, "w"))) ECCERR(retval); codes_handle_delete(handle_surface_p); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_mslp = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_mslp, data_date, data_time, t_write, t_init, 3, 1); if ((retval = codes_set_long(handle_mslp, "typeOfFirstFixedSurface", 102))) ECCERR(retval); if ((retval = codes_set_long(handle_mslp, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_mslp, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_mslp, "level", 0))) ECCERR(retval); interpolate_to_ll(mslp, grib_output_field, grid); if ((retval = codes_set_double_array(handle_mslp, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_mslp, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_mslp); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_t2 = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_t2, data_date, data_time, t_write, t_init, 0, 0); if ((retval = codes_set_long(handle_t2, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_t2, "scaledValueOfFirstFixedSurface", 2))) ECCERR(retval); if ((retval = codes_set_long(handle_t2, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_t2, "level", 2))) ECCERR(retval); interpolate_to_ll(t2, grib_output_field, grid); if ((retval = codes_set_double_array(handle_t2, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_t2, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_t2); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_rprate = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_rprate, data_date, data_time, t_write, t_init, 1, 65); if ((retval = codes_set_long(handle_rprate, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_rprate, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_rprate, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rprate, "level", 0))) ECCERR(retval); interpolate_to_ll(rprate, grib_output_field, grid); if ((retval = codes_set_double_array(handle_rprate, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_rprate, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_rprate); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_cape = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_cape, data_date, data_time, t_write, t_init, 7, 6); if ((retval = codes_set_long(handle_cape, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_cape, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_cape, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_string(handle_cape, "shortName", "cape", &cape_string_length))) ECCERR(retval); if ((retval = codes_set_long(handle_cape, "level", 0))) ECCERR(retval); interpolate_to_ll(cape, grib_output_field, grid); if ((retval = codes_set_double_array(handle_cape, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_cape, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_cape); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_sprate = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_sprate, data_date, data_time, t_write, t_init, 1, 66); if ((retval = codes_set_long(handle_sprate, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_sprate, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_sprate, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_sprate, "level", 0))) ECCERR(retval); interpolate_to_ll(sprate, grib_output_field, grid); if ((retval = codes_set_double_array(handle_sprate, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_sprate, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_sprate); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_tcdc = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_tcdc, data_date, data_time, t_write, t_init, 6, 1); if ((retval = codes_set_long(handle_tcdc, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_tcdc, "scaledValueOfFirstFixedSurface", 0))) ECCERR(retval); if ((retval = codes_set_long(handle_tcdc, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_tcdc, "level", 0))) ECCERR(retval); if ((retval = codes_set_string(handle_tcdc, "shortName", "tcc", &tcc_string_length))) ECCERR(retval); interpolate_to_ll(tcdc, grib_output_field, grid); if ((retval = codes_set_double_array(handle_tcdc, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_tcdc, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_tcdc); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_u_10m_mean = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_u_10m_mean, data_date, data_time, t_write, t_init, 2, 2); if ((retval = codes_set_long(handle_wind_u_10m_mean, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_10m_mean, "scaledValueOfFirstFixedSurface", 10))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_10m_mean, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_10m_mean, "level", 10))) ECCERR(retval); interpolate_to_ll(wind_10_m_mean_u_at_cell, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_u_10m_mean, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_u_10m_mean, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_u_10m_mean); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_v_10m_mean = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_v_10m_mean, data_date, data_time, t_write, t_init, 2, 3); if ((retval = codes_set_long(handle_wind_v_10m_mean, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_10m_mean, "scaledValueOfFirstFixedSurface", 10))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_10m_mean, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_10m_mean, "level", 10))) ECCERR(retval); interpolate_to_ll(wind_10_m_mean_v_at_cell, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_v_10m_mean, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_v_10m_mean, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_v_10m_mean); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_10m_gusts = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_10m_gusts, data_date, data_time, t_write, t_init, 2, 22); if ((retval = codes_set_long(handle_wind_10m_gusts, "typeOfFirstFixedSurface", 103))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_10m_gusts, "scaledValueOfFirstFixedSurface", 10))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_10m_gusts, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_10m_gusts, "level", 10))) ECCERR(retval); interpolate_to_ll(wind_10_m_gusts_speed_at_cell, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_10m_gusts, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_10m_gusts, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_10m_gusts); fclose(OUT_GRIB); } free(wind_10_m_mean_u_at_cell); free(wind_10_m_mean_v_at_cell); free(wind_10_m_gusts_speed_at_cell); free(t2); free(mslp); free(surface_p); free(rprate); free(sprate); free(tcdc); free(cape); } // Diagnostics of quantities that are not surface-specific. Scalar_field *divv_h_all_layers = calloc(1, sizeof(Scalar_field)); divv_h(state_write_out -> wind, *divv_h_all_layers, grid); calc_rel_vort(state_write_out -> wind, diagnostics, grid, dualgrid); Scalar_field *rel_vort = calloc(1, sizeof(Scalar_field)); curl_field_to_cells(diagnostics -> rel_vort, *rel_vort, grid); // Diagnozing the u and v wind components at the vector points. calc_uv_at_edge(state_write_out -> wind, diagnostics -> u_at_edge, diagnostics -> v_at_edge, grid); // Averaging to cell centers for output. edges_to_cells(diagnostics -> u_at_edge, diagnostics -> u_at_cell, grid); edges_to_cells(diagnostics -> v_at_edge, diagnostics -> v_at_cell, grid); Scalar_field *rh = calloc(1, sizeof(Scalar_field)); Scalar_field *epv = calloc(1, sizeof(Scalar_field)); Scalar_field *pressure = calloc(1, sizeof(Scalar_field)); #pragma omp parallel for for (int i = 0; i < NO_OF_SCALARS; ++i) { if (NO_OF_CONSTITUENTS >= 4) { (*rh)[i] = 100*rel_humidity(state_write_out -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i], diagnostics -> temperature_gas[i]); } (*pressure)[i] = density_gas(state_write_out, i)*gas_constant_diagnostics(state_write_out, i, config)*diagnostics -> temperature_gas[i]; } #pragma omp parallel for for (int i = 0; i < NO_OF_SCALARS; ++i) { diagnostics -> scalar_field_placeholder[i] = density_gas(state_write_out, i); } calc_pot_vort(state_write_out -> wind, diagnostics -> scalar_field_placeholder, diagnostics, grid, dualgrid); epv_diagnostics(diagnostics -> pot_vort, state_write_out, *epv, grid, dualgrid); // Pressure level output. double closest_weight; if (config_io -> pressure_level_output_switch == 1) { double *pressure_levels = malloc(sizeof(double)*NO_OF_PRESSURE_LEVELS); get_pressure_levels(pressure_levels); // Allocating memory for the variables on pressure levels. double (*geopotential_height)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*t_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*rh_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*epv_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*u_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*v_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); double (*rel_vort_on_pressure_levels)[NO_OF_PRESSURE_LEVELS] = malloc(sizeof(double[NO_OF_SCALARS_H][NO_OF_PRESSURE_LEVELS])); // Vertical interpolation to the pressure levels. #pragma omp parallel for private(vector_to_minimize, closest_index, second_closest_index, closest_weight) for (int i = 0; i < NO_OF_SCALARS_H; ++i) { for (int j = 0; j < NO_OF_PRESSURE_LEVELS; ++j) { for (int k = 0; k < NO_OF_LAYERS; ++k) { /* It is approx. p = p_0exp(-z/H) => log(p) = log(p_0) - z/H => z/H = log(p_0) - log(p) = log(p_0/p) => z = H*log(p_0/p). This leads to fabs(z_2 - z_1) = fabs(H*log(p_2/p) - H*log(p_1/p)) = H*fabs(log(p_2/p) - log(p_1/p)) = H*fabs(log(p_2/p_1)) propto fabs(log(p_2/p_1)). */ vector_to_minimize[k] = fabs(log(pressure_levels[j]/(*pressure)[k*NO_OF_SCALARS_H + i])); } // Finding the model layer that is the closest to the desired pressure level. closest_index = find_min_index(vector_to_minimize, NO_OF_LAYERS); // first guess for the other layer that will be used for the interpolation second_closest_index = closest_index + 1; // in this case, the layer above the closest layer will be used for the interpolation if (pressure_levels[j] < (*pressure)[closest_index*NO_OF_SCALARS_H + i]) { second_closest_index = closest_index - 1; } // in this case, a missing value will be written if ((closest_index == NO_OF_LAYERS - 1 && second_closest_index == NO_OF_LAYERS) || (closest_index < 0 || second_closest_index < 0)) { geopotential_height[i][j] = 9999; t_on_pressure_levels[i][j] = 9999; rh_on_pressure_levels[i][j] = 9999; epv_on_pressure_levels[i][j] = 9999; rel_vort_on_pressure_levels[i][j] = 9999; u_on_pressure_levels[i][j] = 9999; v_on_pressure_levels[i][j] = 9999; } else { /* this is the interpolation weight: closest_weight = 1 - fabs((delta z)_{closest})/(fabs(z_{closest} - z_{other})) */ closest_weight = 1 - vector_to_minimize[closest_index]/ (fabs(log((*pressure)[closest_index*NO_OF_SCALARS_H + i]/(*pressure)[second_closest_index*NO_OF_SCALARS_H + i])) + EPSILON_SECURITY); geopotential_height[i][j] = closest_weight*grid -> gravity_potential[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*grid -> gravity_potential[second_closest_index*NO_OF_SCALARS_H + i]; geopotential_height[i][j] = geopotential_height[i][j]/GRAVITY_MEAN_SFC_ABS; t_on_pressure_levels[i][j] = closest_weight*diagnostics -> temperature_gas[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*diagnostics -> temperature_gas[second_closest_index*NO_OF_SCALARS_H + i]; rh_on_pressure_levels[i][j] = closest_weight*(*rh)[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*(*rh)[second_closest_index*NO_OF_SCALARS_H + i]; epv_on_pressure_levels[i][j] = closest_weight*(*epv)[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*(*epv)[second_closest_index*NO_OF_SCALARS_H + i]; rel_vort_on_pressure_levels[i][j] = closest_weight*(*rel_vort)[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*(*rel_vort)[second_closest_index*NO_OF_SCALARS_H + i]; u_on_pressure_levels[i][j] = closest_weight*diagnostics-> u_at_cell[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*diagnostics-> u_at_cell[second_closest_index*NO_OF_SCALARS_H + i]; v_on_pressure_levels[i][j] = closest_weight*diagnostics-> v_at_cell[closest_index*NO_OF_SCALARS_H + i] + (1 - closest_weight)*diagnostics-> v_at_cell[second_closest_index*NO_OF_SCALARS_H + i]; } } } // Netcdf output. if (config_io -> netcdf_output_switch == 1) { int OUTPUT_FILE_PRESSURE_LEVEL_LENGTH = 300; char *OUTPUT_FILE_PRESSURE_LEVEL_PRE = malloc((OUTPUT_FILE_PRESSURE_LEVEL_LENGTH + 1)*sizeof(char)); sprintf(OUTPUT_FILE_PRESSURE_LEVEL_PRE, "%s+%ds_pressure_levels.nc", config_io -> run_id, (int) (t_write - t_init)); OUTPUT_FILE_PRESSURE_LEVEL_LENGTH = strlen(OUTPUT_FILE_PRESSURE_LEVEL_PRE); free(OUTPUT_FILE_PRESSURE_LEVEL_PRE); char *OUTPUT_FILE_PRESSURE_LEVEL = malloc((OUTPUT_FILE_PRESSURE_LEVEL_LENGTH + 1)*sizeof(char)); sprintf(OUTPUT_FILE_PRESSURE_LEVEL, "%s+%ds_pressure_levels.nc", config_io -> run_id, (int) (t_write - t_init)); int ncid_pressure_level, scalar_h_dimid, level_dimid, geopot_height_id, temp_pressure_level_id, rh_pressure_level_id, wind_u_pressure_level_id, wind_v_pressure_level_id, pressure_levels_id, epv_pressure_level_id, rel_vort_pressure_level_id; if ((retval = nc_create(OUTPUT_FILE_PRESSURE_LEVEL, NC_CLOBBER, &ncid_pressure_level))) NCERR(retval); free(OUTPUT_FILE_PRESSURE_LEVEL); if ((retval = nc_def_dim(ncid_pressure_level, "scalar_index_h", NO_OF_SCALARS_H, &scalar_h_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid_pressure_level, "level_index", NO_OF_PRESSURE_LEVELS, &level_dimid))) NCERR(retval); int dimids_pressure_level_scalar[2]; dimids_pressure_level_scalar[0] = scalar_h_dimid; dimids_pressure_level_scalar[1] = level_dimid; // Defining the variables. if ((retval = nc_def_var(ncid_pressure_level, "pressure_levels", NC_DOUBLE, 1, &level_dimid, &pressure_levels_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, pressure_levels_id, "units", strlen("Pa"), "Pa"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "geopotential_height", NC_DOUBLE, 2, dimids_pressure_level_scalar, &geopot_height_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, geopot_height_id, "units", strlen("gpm"), "gpm"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "temperature", NC_DOUBLE, 2, dimids_pressure_level_scalar, &temp_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, temp_pressure_level_id, "units", strlen("K"), "K"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "relative_humidity", NC_DOUBLE, 2, dimids_pressure_level_scalar, &rh_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, rh_pressure_level_id, "units", strlen("%"), "%"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "ertels_potential_vorticity", NC_DOUBLE, 2, dimids_pressure_level_scalar, &epv_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, epv_pressure_level_id, "units", strlen("Km^2/(kgs)"), "Km^2/(kgs)"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "wind_u", NC_DOUBLE, 2, dimids_pressure_level_scalar, &wind_u_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, wind_u_pressure_level_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "wind_v", NC_DOUBLE, 2, dimids_pressure_level_scalar, &wind_v_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, wind_v_pressure_level_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_def_var(ncid_pressure_level, "relative_vorticity", NC_DOUBLE, 2, dimids_pressure_level_scalar, &rel_vort_pressure_level_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid_pressure_level, rel_vort_pressure_level_id, "units", strlen("1/s"), "1/s"))) NCERR(retval); if ((retval = nc_enddef(ncid_pressure_level))) NCERR(retval); // Writing the arrays. if ((retval = nc_put_var_double(ncid_pressure_level, pressure_levels_id, &pressure_levels[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, geopot_height_id, &geopotential_height[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, temp_pressure_level_id, &t_on_pressure_levels[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, epv_pressure_level_id, &epv_on_pressure_levels[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, rh_pressure_level_id, &rh_on_pressure_levels[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, wind_u_pressure_level_id, &u_on_pressure_levels[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, wind_v_pressure_level_id, &v_on_pressure_levels[0][0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid_pressure_level, rel_vort_pressure_level_id, &rel_vort_on_pressure_levels[0][0]))) NCERR(retval); // Closing the netcdf file. if ((retval = nc_close(ncid_pressure_level))) NCERR(retval); } // Grib output. if (config_io -> grib_output_switch == 1) { char *SAMPLE_FILENAME = "../../src/io/grib_template.grb2"; FILE *SAMPLE_FILE; int OUTPUT_FILE_PRESSURE_LEVEL_LENGTH = 300; char *OUTPUT_FILE_PRESSURE_LEVEL_PRE = malloc((OUTPUT_FILE_PRESSURE_LEVEL_LENGTH + 1)*sizeof(char)); sprintf(OUTPUT_FILE_PRESSURE_LEVEL_PRE, "%s+%ds_pressure_levels.grb2", config_io -> run_id, (int) (t_write - t_init)); OUTPUT_FILE_PRESSURE_LEVEL_LENGTH = strlen(OUTPUT_FILE_PRESSURE_LEVEL_PRE); free(OUTPUT_FILE_PRESSURE_LEVEL_PRE); char *OUTPUT_FILE_PRESSURE_LEVEL = malloc((OUTPUT_FILE_PRESSURE_LEVEL_LENGTH + 1)*sizeof(char)); sprintf(OUTPUT_FILE_PRESSURE_LEVEL, "%s+%ds_pressure_levels.grb2", config_io -> run_id, (int) (t_write - t_init)); FILE *OUT_GRIB; OUT_GRIB = fopen(OUTPUT_FILE_PRESSURE_LEVEL, "w+"); double *geopotential_height_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *temperature_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *rh_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *epv_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_u_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_v_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); double *rel_vort_pressure_level = malloc(NO_OF_SCALARS_H*sizeof(double)); codes_handle *handle_geopotential_height_pressure_level = NULL; codes_handle *handle_temperature_pressure_level = NULL; codes_handle *handle_rh_pressure_level = NULL; codes_handle *handle_epv_pressure_level = NULL; codes_handle *handle_wind_u_pressure_level = NULL; codes_handle *handle_wind_v_pressure_level = NULL; codes_handle *handle_rel_vort_pressure_level = NULL; for (int i = 0; i < NO_OF_PRESSURE_LEVELS; ++i) { #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { geopotential_height_pressure_level[j] = geopotential_height[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { temperature_pressure_level[j] = t_on_pressure_levels[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { rh_pressure_level[j] = rh_on_pressure_levels[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { epv_pressure_level[j] = epv_on_pressure_levels[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { wind_u_pressure_level[j] = u_on_pressure_levels[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { wind_v_pressure_level[j] = v_on_pressure_levels[j][i]; } #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { rel_vort_pressure_level[j] = rel_vort_on_pressure_levels[j][i]; } SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_geopotential_height_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_geopotential_height_pressure_level, data_date, data_time, t_write, t_init, 3, 5); if ((retval = codes_set_double(handle_geopotential_height_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_geopotential_height_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_geopotential_height_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_geopotential_height_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_geopotential_height_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_geopotential_height_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(geopotential_height_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_geopotential_height_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if (i == 0) { if ((retval = codes_write_message(handle_geopotential_height_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "w"))) ECCERR(retval); } else { if ((retval = codes_write_message(handle_geopotential_height_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); } codes_handle_delete(handle_geopotential_height_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_temperature_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_temperature_pressure_level, data_date, data_time, t_write, t_init, 0, 0); if ((retval = codes_set_double(handle_temperature_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(temperature_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_temperature_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_temperature_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_temperature_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_rh_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_rh_pressure_level, data_date, data_time, t_write, t_init, 1, 1); if ((retval = codes_set_double(handle_rh_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_rh_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rh_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_rh_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_rh_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rh_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(rh_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_rh_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_rh_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_rh_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_rel_vort_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_rel_vort_pressure_level, data_date, data_time, t_write, t_init, 2, 12); if ((retval = codes_set_double(handle_rel_vort_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(rel_vort_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_rel_vort_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_rel_vort_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_rel_vort_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_epv_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_epv_pressure_level, data_date, data_time, t_write, t_init, 2, 14); if ((retval = codes_set_double(handle_epv_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_epv_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_epv_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_epv_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_epv_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_epv_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(epv_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_epv_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_epv_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_epv_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_u_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_u_pressure_level, data_date, data_time, t_write, t_init, 2, 2); if ((retval = codes_set_double(handle_wind_u_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(wind_u_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_u_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_u_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_u_pressure_level); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_v_pressure_level = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_v_pressure_level, data_date, data_time, t_write, t_init, 2, 3); if ((retval = codes_set_double(handle_wind_v_pressure_level, "missingValue", 9999))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_pressure_level, "bitmapPresent", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_pressure_level, "typeOfFirstFixedSurface", 100))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_pressure_level, "scaledValueOfFirstFixedSurface", (int) pressure_levels[i]))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_pressure_level, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_pressure_level, "level", 0.01*pressure_levels[i]))) ECCERR(retval); interpolate_to_ll(wind_v_pressure_level, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_v_pressure_level, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_v_pressure_level, OUTPUT_FILE_PRESSURE_LEVEL, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_v_pressure_level); } free(geopotential_height_pressure_level); free(temperature_pressure_level); free(epv_pressure_level); free(rh_pressure_level); free(wind_u_pressure_level); free(wind_v_pressure_level); free(rel_vort_pressure_level); free(OUTPUT_FILE_PRESSURE_LEVEL); fclose(OUT_GRIB); } free(geopotential_height); free(t_on_pressure_levels); free(rh_on_pressure_levels); free(u_on_pressure_levels); free(v_on_pressure_levels); free(epv_on_pressure_levels); free(pressure_levels); } // Grib output. if (config_io -> model_level_output_switch == 1 && config_io -> grib_output_switch == 1) { // Grib requires everything to be on horizontal levels. double *temperature_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *pressure_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *rh_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_u_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_v_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *rel_vort_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *divv_h = malloc(NO_OF_SCALARS_H*sizeof(double)); double *wind_w_h = malloc(NO_OF_SCALARS_H*sizeof(double)); char OUTPUT_FILE_PRE[300]; sprintf(OUTPUT_FILE_PRE, "%s+%ds.grb2", config_io -> run_id, (int) (t_write - t_init)); char OUTPUT_FILE[strlen(OUTPUT_FILE_PRE) + 1]; sprintf(OUTPUT_FILE, "%s+%ds.grb2", config_io -> run_id, (int) (t_write - t_init)); char *SAMPLE_FILENAME = "../../src/io/grib_template.grb2"; FILE *SAMPLE_FILE; if (t_init < 0) exit(1); FILE *OUT_GRIB; OUT_GRIB = fopen(OUTPUT_FILE, "w+"); codes_handle *handle_temperature_h = NULL; codes_handle *handle_pressure_h = NULL; codes_handle *handle_wind_u_h = NULL; codes_handle *handle_wind_v_h = NULL; codes_handle *handle_wind_w_h = NULL; codes_handle *handle_rel_vort = NULL; codes_handle *handle_rh = NULL; codes_handle *handle_divv_h = NULL; for (int i = 0; i < NO_OF_LAYERS; ++i) { #pragma omp parallel for for (int j = 0; j < NO_OF_SCALARS_H; ++j) { temperature_h[j] = diagnostics -> temperature_gas[i*NO_OF_SCALARS_H + j]; pressure_h[j] = (*pressure)[i*NO_OF_SCALARS_H + j]; rh_h[j] = (*rh)[i*NO_OF_SCALARS_H + j]; } SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_temperature_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_temperature_h, data_date, data_time, t_write, t_init, 0, 0); if ((retval = codes_set_long(handle_temperature_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_temperature_h, "level", i))) ECCERR(retval); interpolate_to_ll(temperature_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_temperature_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if (i == 0) { if ((retval = codes_write_message(handle_temperature_h, OUTPUT_FILE, "w"))) ECCERR(retval); } else { if ((retval = codes_write_message(handle_temperature_h, OUTPUT_FILE, "a"))) ECCERR(retval); } codes_handle_delete(handle_temperature_h); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_pressure_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_pressure_h, data_date, data_time, t_write, t_init, 0, 0); if ((retval = codes_set_long(handle_pressure_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_pressure_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_pressure_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_pressure_h, "level", i))) ECCERR(retval); interpolate_to_ll(pressure_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_pressure_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_pressure_h, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_pressure_h); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_rh = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_rh, data_date, data_time, t_write, t_init, 0, 1); if ((retval = codes_set_long(handle_rh, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_rh, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_rh, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rh, "level", i))) ECCERR(retval); interpolate_to_ll(rh_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_rh, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_rh, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_rh); for (int j = 0; j < NO_OF_SCALARS_H; ++j) { wind_u_h[j] = diagnostics -> u_at_cell[i*NO_OF_SCALARS_H + j]; wind_v_h[j] = diagnostics -> v_at_cell[i*NO_OF_SCALARS_H + j]; rel_vort_h[j] = (*rel_vort)[i*NO_OF_SCALARS_H + j]; } SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_u_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_u_h, data_date, data_time, t_write, t_init, 2, 2); if ((retval = codes_set_long(handle_wind_u_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_u_h, "level", i))) ECCERR(retval); interpolate_to_ll(wind_u_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_u_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_u_h, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_u_h); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_v_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_wind_v_h, data_date, data_time, t_write, t_init, 2, 3); if ((retval = codes_set_long(handle_wind_v_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_v_h, "level", i))) ECCERR(retval); interpolate_to_ll(wind_v_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_v_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_v_h, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_wind_v_h); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_rel_vort = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_rel_vort, data_date, data_time, t_write, t_init, 2, 12); if ((retval = codes_set_long(handle_rel_vort, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_rel_vort, "level", i))) ECCERR(retval); interpolate_to_ll(rel_vort_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_rel_vort, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_rel_vort, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_rel_vort); for (int j = 0; j < NO_OF_SCALARS_H; ++j) { divv_h[j] = (*divv_h_all_layers)[i*NO_OF_SCALARS_H + j]; } SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_divv_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); set_basic_props2grib(handle_divv_h, data_date, data_time, t_write, t_init, 2, 13); if ((retval = codes_set_long(handle_divv_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_divv_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_divv_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_divv_h, "level", i))) ECCERR(retval); interpolate_to_ll(divv_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_divv_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_divv_h, OUTPUT_FILE, "a"))) ECCERR(retval); codes_handle_delete(handle_divv_h); } free(wind_u_h); free(wind_v_h); free(rel_vort_h); free(divv_h); free(temperature_h); free(pressure_h); free(rh_h); SAMPLE_FILE = fopen(SAMPLE_FILENAME, "r"); handle_wind_w_h = codes_handle_new_from_file(NULL, SAMPLE_FILE, PRODUCT_GRIB, &err); if (err != 0) ECCERR(err); fclose(SAMPLE_FILE); for (int i = 0; i < NO_OF_LEVELS; ++i) { for (int j = 0; j < NO_OF_SCALARS_H; j++) { wind_w_h[j] = state_write_out -> wind[j + i*NO_OF_VECTORS_PER_LAYER]; } set_basic_props2grib(handle_wind_w_h, data_date, data_time, t_write, t_init, 2, 9); if ((retval = codes_set_long(handle_wind_w_h, "typeOfFirstFixedSurface", 26))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_w_h, "scaleFactorOfFirstFixedSurface", 1))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_w_h, "scaledValueOfFirstFixedSurface", i))) ECCERR(retval); if ((retval = codes_set_long(handle_wind_w_h, "level", i))) ECCERR(retval); interpolate_to_ll(wind_w_h, grib_output_field, grid); if ((retval = codes_set_double_array(handle_wind_w_h, "values", grib_output_field, NO_OF_LATLON_IO_POINTS))) ECCERR(retval); if ((retval = codes_write_message(handle_wind_w_h, OUTPUT_FILE, "a"))) ECCERR(retval); } codes_handle_delete(handle_wind_w_h); free(wind_w_h); fclose(OUT_GRIB); } // Netcdf output. if ((config_io -> model_level_output_switch == 1 && config_io -> netcdf_output_switch == 1) || (config -> nwp_mode == 1 && (int) (t_write - t_init) == config -> delta_t_between_analyses)) { // diagnozing the temperatures of all constituents double *temperatures = malloc((NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS*sizeof(double)); // loop over all gridpoints #pragma omp parallel for for (int i = 0; i < NO_OF_SCALARS; ++i) { // loop over all condensed constituents for (int j = 0; j < NO_OF_CONDENSED_CONSTITUENTS; ++j) { // the non-LTE case if (config -> assume_lte == 0) { if (state_write_out -> rho[j*NO_OF_SCALARS + i] >= EPSILON_SECURITY) { temperatures[j*NO_OF_SCALARS + i] = state_write_out -> condensed_density_temperatures[j*NO_OF_SCALARS + i]/state_write_out -> rho[j*NO_OF_SCALARS + i]; } else { temperatures[j*NO_OF_SCALARS + i] = diagnostics -> temperature_gas[i]; } } // the LTE case if (config -> assume_lte == 1) { temperatures[j*NO_OF_SCALARS + i] = diagnostics -> temperature_gas[i]; } } temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i] = diagnostics -> temperature_gas[i]; } char OUTPUT_FILE_PRE[300]; sprintf(OUTPUT_FILE_PRE, "%s+%ds.nc", config_io -> run_id, (int) (t_write - t_init)); char OUTPUT_FILE[strlen(OUTPUT_FILE_PRE) + 1]; sprintf(OUTPUT_FILE, "%s+%ds.nc", config_io -> run_id, (int) (t_write - t_init)); int ncid, retval, scalar_dimid, vector_h_dimid, vector_v_dimid, vector_dimid, densities_dimid, temperatures_dimid, curl_field_dimid, single_double_dimid, densities_id, temperatures_id, wind_id, rh_id, divv_h_all_layers_id, rel_vort_id; if ((retval = nc_create(OUTPUT_FILE, NC_CLOBBER, &ncid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "scalar_index", NO_OF_SCALARS, &scalar_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "vector_index", NO_OF_VECTORS, &vector_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "densities_index", NO_OF_CONSTITUENTS*NO_OF_SCALARS, &densities_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "temperatures_index", (NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS, &temperatures_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "vector_index_h", NO_OF_H_VECTORS, &vector_h_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "vector_index_v", NO_OF_V_VECTORS, &vector_v_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "curl_point_index", NO_OF_LAYERS*2*NO_OF_VECTORS_H + NO_OF_VECTORS_H, &curl_field_dimid))) NCERR(retval); if ((retval = nc_def_dim(ncid, "single_double_dimid_index", 1, &single_double_dimid))) NCERR(retval); // Defining the variables. if ((retval = nc_def_var(ncid, "densities", NC_DOUBLE, 1, &densities_dimid, &densities_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, densities_id, "units", strlen("kg/m^3"), "kg/m^3"))) NCERR(retval); if ((retval = nc_def_var(ncid, "temperatures", NC_DOUBLE, 1, &temperatures_dimid, &temperatures_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, temperatures_id, "units", strlen("K"), "K"))) NCERR(retval); if ((retval = nc_def_var(ncid, "wind", NC_DOUBLE, 1, &vector_dimid, &wind_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, wind_id, "units", strlen("m/s"), "m/s"))) NCERR(retval); if ((retval = nc_def_var(ncid, "rh", NC_DOUBLE, 1, &scalar_dimid, &rh_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, rh_id, "units", strlen("%"), "%"))) NCERR(retval); if ((retval = nc_def_var(ncid, "rel_vort", NC_DOUBLE, 1, &scalar_dimid, &rel_vort_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, rel_vort_id, "units", strlen("1/s"), "1/s"))) NCERR(retval); if ((retval = nc_def_var(ncid, "divv_h_all_layers", NC_DOUBLE, 1, &scalar_dimid, &divv_h_all_layers_id))) NCERR(retval); if ((retval = nc_put_att_text(ncid, divv_h_all_layers_id, "units", strlen("1/s"), "1/s"))) NCERR(retval); if ((retval = nc_enddef(ncid))) NCERR(retval); // setting the variables if ((retval = nc_put_var_double(ncid, densities_id, &state_write_out -> rho[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, temperatures_id, &temperatures[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, wind_id, &state_write_out -> wind[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, rh_id, &(*rh)[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, rel_vort_id, &(*rel_vort)[0]))) NCERR(retval); if ((retval = nc_put_var_double(ncid, divv_h_all_layers_id, &(*divv_h_all_layers)[0]))) NCERR(retval); // Closing the netcdf file. if ((retval = nc_close(ncid))) NCERR(retval); free(temperatures); } free(grib_output_field); free(divv_h_all_layers); free(rel_vort); free(rh); free(epv); free(pressure); printf("Output written.\n"); return 0; } int write_out_integral(State *state_write_out, double time_since_init, Grid *grid, Dualgrid *dualgrid, Diagnostics *diagnostics, int integral_id) { /* integral_id: 0: dry mass 1: entropy 2: energy */ double global_integral = 0; FILE *global_integral_file; int INTEGRAL_FILE_LENGTH = 200; char *INTEGRAL_FILE_PRE = malloc((INTEGRAL_FILE_LENGTH + 1)*sizeof(char)); if (integral_id == 0) sprintf(INTEGRAL_FILE_PRE, "%s", "masses"); if (integral_id == 1) sprintf(INTEGRAL_FILE_PRE, "%s", "potential_temperature_density"); if (integral_id == 2) sprintf(INTEGRAL_FILE_PRE, "%s", "energy"); INTEGRAL_FILE_LENGTH = strlen(INTEGRAL_FILE_PRE); char *INTEGRAL_FILE = malloc((INTEGRAL_FILE_LENGTH + 1)*sizeof(char)); sprintf(INTEGRAL_FILE, "%s", INTEGRAL_FILE_PRE); free(INTEGRAL_FILE_PRE); if (integral_id == 0) { // masses global_integral_file = fopen(INTEGRAL_FILE, "a"); fprintf(global_integral_file, "%lf\t", time_since_init); for (int const_id = 0; const_id < NO_OF_CONSTITUENTS; ++const_id) { #pragma omp parallel for for (int i = 0; i < NO_OF_SCALARS; ++i) { diagnostics -> scalar_field_placeholder[i] = state_write_out -> rho[const_id*NO_OF_SCALARS + i]; } global_scalar_integrator(diagnostics -> scalar_field_placeholder, grid, &global_integral); if (const_id == NO_OF_CONSTITUENTS - 1) { fprintf(global_integral_file, "%lf\n", global_integral); } else { fprintf(global_integral_file, "%lf\t", global_integral); } } fclose(global_integral_file); } if (integral_id == 1) { // density times potential temperature global_integral_file = fopen(INTEGRAL_FILE, "a"); global_scalar_integrator(state_write_out -> rhotheta, grid, &global_integral); fprintf(global_integral_file, "%lf\t%lf\n", time_since_init, global_integral); fclose(global_integral_file); } if (integral_id == 2) { double kinetic_integral, potential_integral, internal_integral; global_integral_file = fopen(INTEGRAL_FILE, "a"); Scalar_field *e_kin_density = malloc(sizeof(Scalar_field)); inner_product(state_write_out -> wind, state_write_out -> wind, *e_kin_density, grid); #pragma omp parallel for for (int i = 0; i < NO_OF_SCALARS; ++i) { diagnostics -> scalar_field_placeholder[i] = state_write_out -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]; } scalar_times_scalar(diagnostics -> scalar_field_placeholder, *e_kin_density, *e_kin_density); global_scalar_integrator(*e_kin_density, grid, &kinetic_integral); free(e_kin_density); Scalar_field *pot_energy_density = malloc(sizeof(Scalar_field)); scalar_times_scalar(diagnostics -> scalar_field_placeholder, grid -> gravity_potential, *pot_energy_density); global_scalar_integrator(*pot_energy_density, grid, &potential_integral); free(pot_energy_density); Scalar_field *int_energy_density = malloc(sizeof(Scalar_field)); scalar_times_scalar(diagnostics -> scalar_field_placeholder, diagnostics -> temperature_gas, *int_energy_density); global_scalar_integrator(*int_energy_density, grid, &internal_integral); fprintf(global_integral_file, "%lf\t%lf\t%lf\t%lf\n", time_since_init, 0.5*kinetic_integral, potential_integral, spec_heat_capacities_v_gas(0)*internal_integral); free(int_energy_density); fclose(global_integral_file); } free(INTEGRAL_FILE); return 0; } int set_basic_props2grib(codes_handle *handle, long data_date, long data_time, long t_write, long t_init, long parameter_category, long parameter_number) { /* This function sets the basic properties of a grib message. */ int retval; if ((retval = codes_set_long(handle, "parameterCategory", parameter_category))) ECCERR(retval); if ((retval = codes_set_long(handle, "parameterNumber", parameter_number))) ECCERR(retval); if ((retval = codes_set_long(handle, "dataDate", data_date))) ECCERR(retval); if ((retval = codes_set_long(handle, "dataTime", data_time))) ECCERR(retval); if ((retval = codes_set_long(handle, "forecastTime", t_write - t_init))) ECCERR(retval); if ((retval = codes_set_long(handle, "stepRange", t_write - t_init))) ECCERR(retval); if ((retval = codes_set_long(handle, "typeOfGeneratingProcess", 1))) ECCERR(retval); if ((retval = codes_set_long(handle, "discipline", 0))) ECCERR(retval); if ((retval = codes_set_long(handle, "gridDefinitionTemplateNumber", 0))) ECCERR(retval); if ((retval = codes_set_long(handle, "Ni", NO_OF_LON_IO_POINTS))) ECCERR(retval); if ((retval = codes_set_long(handle, "Nj", NO_OF_LAT_IO_POINTS))) ECCERR(retval); if ((retval = codes_set_long(handle, "iScansNegatively", 0))) ECCERR(retval); if ((retval = codes_set_long(handle, "jScansPositively", 0))) ECCERR(retval); if ((retval = codes_set_double(handle, "latitudeOfFirstGridPointInDegrees", rad2deg(M_PI/2 - 0.5*M_PI/NO_OF_LAT_IO_POINTS)))) ECCERR(retval); if ((retval = codes_set_double(handle, "longitudeOfFirstGridPointInDegrees", 0))) ECCERR(retval); if ((retval = codes_set_double(handle, "latitudeOfLastGridPointInDegrees", -rad2deg(M_PI/2 - 0.5*M_PI/NO_OF_LAT_IO_POINTS)))) ECCERR(retval); if ((retval = codes_set_double(handle, "longitudeOfLastGridPointInDegrees", rad2deg(-2*M_PI/NO_OF_LON_IO_POINTS)))) ECCERR(retval); if ((retval = codes_set_double(handle, "iDirectionIncrementInDegrees", rad2deg(2*M_PI/NO_OF_LON_IO_POINTS)))) ECCERR(retval); if ((retval = codes_set_double(handle, "jDirectionIncrementInDegrees", rad2deg(M_PI/NO_OF_LAT_IO_POINTS)))) ECCERR(retval); if ((retval = codes_set_long(handle, "discipline", 0))) ECCERR(retval); if ((retval = codes_set_long(handle, "centre", 255))) ECCERR(retval); if ((retval = codes_set_long(handle, "significanceOfReferenceTime", 1))) ECCERR(retval); if ((retval = codes_set_long(handle, "productionStatusOfProcessedData", 1))) ECCERR(retval); if ((retval = codes_set_long(handle, "typeOfProcessedData", 1))) ECCERR(retval); if ((retval = codes_set_long(handle, "indicatorOfUnitOfTimeRange", 13))) ECCERR(retval); if ((retval = codes_set_long(handle, "stepUnits", 13))) ECCERR(retval); return 0; } double calc_std_dev(double vector_for_std_deviation[], int no_of_values) { double mean = 0; for (int i = 0; i < no_of_values; ++i) { mean += 1.0/no_of_values*vector_for_std_deviation[i]; } double result = 0; for (int i = 0; i < no_of_values; ++i) { result += pow(vector_for_std_deviation[i] - mean, 2); } result = 1/sqrt(no_of_values)*sqrt(result); return result; } int global_scalar_integrator(Scalar_field density_gen, Grid *grid, double *result) { *result = 0; for (int i = 0; i < NO_OF_SCALARS; ++i) { *result += density_gen[i]*grid -> volume[i]; } return 0; } int interpolation_t(State *state_0, State *state_p1, State *state_write, double t_0, double t_p1, double t_write, Grid *grid) { double weight_0, weight_p1; weight_p1 = (t_write - t_0)/(t_p1 - t_0); weight_0 = 1 - weight_p1; linear_combine_two_states(state_0, state_p1, state_write, weight_0, weight_p1, grid); return 0; } double pseudopotential(State *state, Grid *grid, int scalar_index) { /* This function returns the pseudopotential temperature, which is needed for diagnozing CAPE. */ double result; result = 0; // the dry case if (NO_OF_CONSTITUENTS == 1) { result = grid -> theta_bg[scalar_index] + state -> theta_pert[scalar_index]; } // the moist case, based on Bolton (1980) else { double alpha_1, alpha_2, alpha_3, r, temperature, pressure, t_lcl; r = state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + scalar_index] /state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + scalar_index]; temperature = (grid -> exner_bg[scalar_index] + state -> exner_pert[scalar_index]) *(grid -> theta_bg[scalar_index] + state -> theta_pert[scalar_index]); pressure = P_0*pow(grid -> exner_bg[scalar_index] + state -> exner_pert[scalar_index], spec_heat_capacities_p_gas(0)/specific_gas_constants(0)); alpha_1 = 0.2854*(1 - 0.28e-3*r); // this is just an estimate for now t_lcl = (grid -> exner_bg[scalar_index - NO_OF_SCALARS_H] + state -> exner_pert[scalar_index - NO_OF_SCALARS_H]) *(grid -> theta_bg[scalar_index - NO_OF_SCALARS_H] + state -> theta_pert[scalar_index - NO_OF_SCALARS_H]); alpha_2 = 3.376/t_lcl - 0.00254; alpha_3 = r*(1 + 0.81e-3*r); result = temperature*pow(P_0/pressure, alpha_1)*exp(alpha_2*alpha_3); } return result; } // This function returns the pressure levels for the pressure_level output. int get_pressure_levels(double pressure_levels[]) { pressure_levels[0] = 20000; pressure_levels[1] = 30000; pressure_levels[2] = 50000; pressure_levels[3] = 70000; pressure_levels[4] = 85000; pressure_levels[5] = 92500; return 0; }
runLengthMatrix.c
/* * Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com) * * Released under the MIT license, see LICENSE.txt */ #include <getopt.h> #include <stdio.h> #include <ctype.h> #include <memory.h> #include <hashTableC.h> #include <unistd.h> #include <time.h> #include "marginVersion.h" #include "margin.h" #include "htsIntegration.h" #include "helenFeatures.h" /* * Main functions */ void usage() { fprintf(stderr, "usage: runLengthMatrix <ALIGN_BAM> <REFERENCE_FASTA> <PARAMS> [options]\n"); fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H); fprintf(stderr, "Produces a run length matrix of reads in ALIGN_BAM to REFERENCE_FASTA.\n"); fprintf(stderr, "\nRequired arguments:\n"); fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n"); fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n"); // fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n"); fprintf(stderr, " PARAMS is the file with margin parameters.\n"); // fprintf(stderr, "\nDefault options:\n"); fprintf(stderr, " -h --help : Print this help screen\n"); fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n"); # ifdef _OPENMP fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n"); #endif fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n"); fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n"); fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n"); fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n"); fprintf(stderr, " -l --maxRunLength : Maximum run length (default 50)\n"); fprintf(stderr, "\n"); } int64_t charToNuclIdx(char nucl, bool forward) { switch (nucl) { case 'a': case 'A': return forward ? 0 : 3; case 'c': case 'C': return forward ? 1 : 2; case 'g': case 'G': return forward ? 2 : 1; case 't': case 'T': return forward ? 3 : 0; default: return -1; } } int64_t getRunLengthArrayIndex(int threadIdx, int64_t nuclIdx, uint64_t refRL, uint64_t readRL, int64_t maxRL) { // bad thread assert(threadIdx >= 0); assert(nuclIdx < 4); int64_t threadPos = threadIdx * 4 * maxRL * maxRL; int64_t nuclPos = nuclIdx * maxRL * maxRL; int64_t refRlPos = (refRL < maxRL ? refRL : maxRL - 1) * maxRL; int64_t readRlPos = (readRL < maxRL ? readRL : maxRL - 1); // bad nucl if (nuclPos < 0) return -1; return threadPos + nuclPos + refRlPos + readRlPos; } int64_t testRunLengthConstruction() { int64_t threadCount = 10; int64_t maxRunLenght = 10; int64_t nuclCount = 4; int64_t maxArraySize = threadCount * nuclCount * maxRunLenght * maxRunLenght; uint64_t *myArray = st_calloc(maxArraySize, sizeof(uint64_t)); for (int thread = 0 ; thread < threadCount; thread++) { for (uint64_t refRl = 0; refRl < maxRunLenght; refRl++) { for (uint64_t readRl = 0; readRl < maxRunLenght; readRl++) { for (int64_t nucl = 0; nucl < nuclCount; nucl++) { char nuc = (nucl==0 ? 'A' : (nucl==1 ? 'C' : (nucl==2 ? 'G' : 'T'))); for (int64_t strand = 0; strand < 2; strand++) { int64_t idx = getRunLengthArrayIndex(thread, charToNuclIdx(nuc, strand == 0), refRl, readRl, maxRunLenght); assert(idx < maxArraySize); myArray[idx] += 1; } } } } } for (int64_t i = 0; i < maxArraySize; i++) { assert(myArray[i] == 2); } free(myArray); } int main(int argc, char *argv[]) { // Parameters / arguments char *logLevelString = stString_copy("critical"); char *bamInFile = NULL; char *referenceFastaFile = NULL; char *paramsFile = NULL; char *outputBase = stString_copy("output"); char *regionStr = NULL; int numThreads = 1; int64_t maxDepth = -1; int64_t maxRunLengthExcl = 51; if (argc < 3) { free(outputBase); free(logLevelString); usage(); return 0; } bamInFile = stString_copy(argv[1]); referenceFastaFile = stString_copy(argv[2]); paramsFile = stString_copy(argv[3]); // Parse the options while (1) { static struct option long_options[] = { { "help", no_argument, 0, 'h' }, { "logLevel", required_argument, 0, 'a' }, # ifdef _OPENMP { "threads", required_argument, 0, 't'}, #endif { "outputBase", required_argument, 0, 'o'}, { "region", required_argument, 0, 'r'}, { "depth", required_argument, 0, 'p'}, { "tempFilesToDisk", no_argument, 0, 'k'}, { "maxRunLength", no_argument, 0, 'l'}, { 0, 0, 0, 0 } }; int option_index = 0; int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:l:", long_options, &option_index); if (key == -1) { break; } switch (key) { case 'a': free(logLevelString); logLevelString = stString_copy(optarg); break; case 'h': usage(); return 0; case 'o': free(outputBase); outputBase = getFileBase(optarg, "output"); break; case 'r': regionStr = stString_copy(optarg); break; case 't': numThreads = atoi(optarg); if (numThreads <= 0) { st_errAbort("Invalid thread count: %d", numThreads); } break; case 'l': maxRunLengthExcl = atoi(optarg) +1; if (maxRunLengthExcl < 1) { st_errAbort("Invalid max run length: %s", optarg); } break; default: usage(); free(outputBase); free(logLevelString); free(bamInFile); free(referenceFastaFile); free(paramsFile); return 0; } } // sanity check (verify files exist) if (access(bamInFile, R_OK) != 0) { st_errAbort("Could not read from input bam file: %s\n", bamInFile); char *idx = stString_print("%s.bai", bamInFile); if (access(idx, R_OK) != 0) { st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile); } free(idx); } if (access(referenceFastaFile, R_OK) != 0) { st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile); } if (access(paramsFile, R_OK) != 0) { st_errAbort("Could not read from params file: %s\n", paramsFile); } // Initialization from arguments time_t startTime = time(NULL); st_setLogLevelFromString(logLevelString); free(logLevelString); if (st_getLogLevel() >= info) { st_setCallocDebug(true); } # ifdef _OPENMP if (numThreads <= 0) { numThreads = 1; } omp_set_num_threads(numThreads); st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads()); # endif //testing testRunLengthConstruction(); // Parse parameters st_logCritical("> Parsing model parameters from file: %s\n", paramsFile); Params *params = params_readParams(paramsFile); // parameter updates st_logInfo(" Setting chunkBoundary to 0\n"); params->polishParams->chunkBoundary = 0; // update depth (if set) if (maxDepth >= 0) { st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth, maxDepth); params->polishParams->maxDepth = (uint64_t) maxDepth; } // Print a report of the parsed parameters if (st_getLogLevel() == debug) { params_printParameters(params, stderr); } // get chunker for bam. if regionStr is NULL, it will be ignored time_t chunkingStart = time(NULL); BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, NULL, params->polishParams, TRUE); st_logCritical( "> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n", time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary, regionStr == NULL ? "all" : regionStr, bamChunker->chunkCount); if (bamChunker->chunkCount == 0) { st_errAbort("> Found no valid reads!\n"); } // (may) need to shuffle chunks stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct); for (int64_t i = 0; i < bamChunker->chunkCount; i++) { stList_append(chunkOrder, stIntTuple_construct1(i)); } if (params->polishParams->shuffleChunks) { switch (params->polishParams->shuffleChunksMethod) { case SCM_SIZE_DESC: st_logCritical("> Ordering chunks by estimated depth\n"); stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks); stList_reverse(chunkOrder); break; case SCM_RANDOM: st_logCritical("> Randomly shuffling chunks\n"); stList_shuffle(chunkOrder); break; } } // this is the run length data we want int64_t totalSize = numThreads * 4 * maxRunLengthExcl * maxRunLengthExcl; uint64_t *runLengthDataForAllThreads = st_calloc(totalSize, sizeof(uint64_t)); // multiproccess the chunks, save to results st_logCritical("> Setup complete, beginning run\n"); int64_t lastReportedPercentage = 0; time_t polishStartTime = time(NULL); # ifdef _OPENMP #pragma omp parallel for schedule(dynamic,1) # endif for (int64_t i = 0; i < bamChunker->chunkCount; i++) { int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0); // Time all chunks time_t chunkStartTime = time(NULL); // Get chunk BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx); // logging char *logIdentifier; bool logProgress = FALSE; int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount); # ifdef _OPENMP int64_t threadIdx = omp_get_thread_num(); logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx); if (threadIdx == 0) { if (currentPercentage != lastReportedPercentage) { logProgress = TRUE; lastReportedPercentage = currentPercentage; } } # else int64_t threadIdx = 0; logIdentifier = stString_copy(""); if (currentPercentage != lastReportedPercentage) { logProgress = TRUE; lastReportedPercentage = currentPercentage; } # endif // prints percentage complete and estimated time remaining if (logProgress) { // log progress int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime); int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage)); char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ? stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining)); st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n", currentPercentage, i, bamChunker->chunkCount, timeDescriptor); free(timeDescriptor); } RleString *rleReference = bamChunk_getReferenceSubstring(bamChunk, referenceFastaFile, params); st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n", logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd); // Convert bam lines into corresponding reads and alignments stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *alignments = stList_construct3(0, (void (*)(void *)) stList_destruct); stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *filteredAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct); convertToReadsAndAlignments(bamChunk, rleReference, reads, alignments, params->polishParams); // do downsampling if appropriate if (params->polishParams->maxDepth > 0) { // get downsampling structures stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct); stList *maintainedAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct); bool didDownsample = downsampleViaReadLikelihood(params->polishParams->maxDepth, bamChunk, reads, alignments, maintainedReads, maintainedAlignments, filteredReads, filteredAlignments); // we need to destroy the discarded reads and structures if (didDownsample) { st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier, stList_length(reads), stList_length(maintainedReads)); // still has all the old reads, need to not free these stList_setDestructor(reads, NULL); stList_setDestructor(alignments, NULL); stList_destruct(reads); stList_destruct(alignments); // and keep the filtered reads reads = maintainedReads; alignments = maintainedAlignments; } // no downsampling, we just need to free the (empty) objects else { assert(stList_length(maintainedReads) == 0); assert(stList_length(maintainedAlignments) == 0); stList_destruct(maintainedReads); stList_destruct(maintainedAlignments); } } // prep for polishing Poa *poa = NULL; // The poa alignment // Generate partial order alignment (POA) (destroys rleAlignments in the process) poa = poa_realignOnlyAnchorAlignments(reads, alignments, rleReference, params->polishParams); for (int64_t pos = 1; pos < stList_length(poa->nodes); pos++) { PoaNode *node = stList_get(poa->nodes, pos); char refNucl = node->base; uint64_t refRL = node->repeatCount; for (int64_t o = 0; o < stList_length(node->observations); o++) { PoaBaseObservation *obs = stList_get(node->observations, o); BamChunkRead *read = stList_get(reads, obs->readNo); char readNucl = read->rleRead->rleString[obs->offset]; uint64_t readRL = read->rleRead->repeatCounts[obs->offset]; if (readNucl == refNucl) { int64_t idx = getRunLengthArrayIndex(threadIdx, charToNuclIdx(readNucl, read->forwardStrand), refRL, readRL, maxRunLengthExcl); if (idx < 0) { continue; } assert(idx < totalSize); runLengthDataForAllThreads[idx] += 1; } } } // report timing if (st_getLogLevel() >= info) { st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n", logIdentifier, stList_length(reads), (int) (time(NULL) - chunkStartTime)); } // final post-completion logging cleanup poa_destruct(poa); rleString_destruct(rleReference); stList_destruct(reads); stList_destruct(alignments); stList_destruct(filteredReads); stList_destruct(filteredAlignments); free(logIdentifier); } st_logCritical("> Consolidating all run lengths\n"); // condense all values uint64_t *condensedRunLengthArray = st_calloc(4 * maxRunLengthExcl * maxRunLengthExcl, sizeof(uint64_t)); for (int t = 0; t < numThreads; t++) { for (int64_t nucl = 0; nucl < 4; nucl++) { for (uint64_t refRL = 1; refRL < maxRunLengthExcl; refRL++) { for (uint64_t readRL = 1; readRL < maxRunLengthExcl; readRL++) { int64_t fullDataPos = getRunLengthArrayIndex(t, nucl, refRL, readRL, maxRunLengthExcl); int64_t condensedPos = getRunLengthArrayIndex(0, nucl, refRL, readRL, maxRunLengthExcl); assert(fullDataPos >= 0); assert(condensedPos >= 0); condensedRunLengthArray[condensedPos] += runLengthDataForAllThreads[fullDataPos]; } } } } // printit char *countFilenameA = stString_print("%s.run_lengths.A.tsv", outputBase); FILE *countFileA = fopen(countFilenameA, "w"); char *countFilenameC = stString_print("%s.run_lengths.C.tsv", outputBase); FILE *countFileC = fopen(countFilenameC, "w"); char *countFilenameG = stString_print("%s.run_lengths.G.tsv", outputBase); FILE *countFileG = fopen(countFilenameG, "w"); char *countFilenameT = stString_print("%s.run_lengths.T.tsv", outputBase); FILE *countFileT = fopen(countFilenameT, "w"); if (countFileA == NULL || countFileC == NULL || countFileG == NULL || countFileT == NULL) { st_errAbort("Could not open output files for writing!", countFilenameA); } else { st_logCritical("> Writing counts to %s, %s, %s %s\n", countFilenameA, countFilenameC, countFilenameG, countFilenameT); } for (uint64_t refRL = 0; refRL < maxRunLengthExcl; refRL++) { for (uint64_t readRL = 0; readRL < maxRunLengthExcl; readRL++) { if (refRL == 0) { // header if (readRL == 0) { fprintf(countFileA, "#ref_rl"); fprintf(countFileC, "#ref_rl"); fprintf(countFileG, "#ref_rl"); fprintf(countFileT, "#ref_rl"); } else { fprintf(countFileA, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileC, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileG, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); fprintf(countFileT, "read_%"PRId64"%s", readRL, readRL == maxRunLengthExcl - 1 ? "+" : ""); } } else { if (readRL == 0) { // header (ish) fprintf(countFileA, "%"PRIu64, refRL); fprintf(countFileC, "%"PRIu64, refRL); fprintf(countFileG, "%"PRIu64, refRL); fprintf(countFileT, "%"PRIu64, refRL); } else { // data int64_t condensedPosA = getRunLengthArrayIndex(0, charToNuclIdx('A', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosC = getRunLengthArrayIndex(0, charToNuclIdx('C', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosG = getRunLengthArrayIndex(0, charToNuclIdx('G', TRUE), refRL, readRL, maxRunLengthExcl); int64_t condensedPosT = getRunLengthArrayIndex(0, charToNuclIdx('T', TRUE), refRL, readRL, maxRunLengthExcl); uint64_t countA = condensedRunLengthArray[condensedPosA]; uint64_t countC = condensedRunLengthArray[condensedPosC]; uint64_t countG = condensedRunLengthArray[condensedPosG]; uint64_t countT = condensedRunLengthArray[condensedPosT]; fprintf(countFileA, "%"PRIu64, countA); fprintf(countFileC, "%"PRIu64, countC); fprintf(countFileG, "%"PRIu64, countG); fprintf(countFileT, "%"PRIu64, countT); } } // increment if (readRL == maxRunLengthExcl - 1) { fprintf(countFileA, "\n"); fprintf(countFileC, "\n"); fprintf(countFileG, "\n"); fprintf(countFileT, "\n"); } else { fprintf(countFileA, "\t"); fprintf(countFileC, "\t"); fprintf(countFileG, "\t"); fprintf(countFileT, "\t"); } } } // close files fclose(countFileA); fclose(countFileC); fclose(countFileG); fclose(countFileT); // cleanup free(countFilenameA); free(countFilenameC); free(countFilenameG); free(countFilenameT); free(condensedRunLengthArray); free(runLengthDataForAllThreads); bamChunker_destruct(bamChunker); params_destruct(params); if (regionStr != NULL) free(regionStr); stList_destruct(chunkOrder); free(outputBase); free(bamInFile); free(referenceFastaFile); free(paramsFile); // log completion char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime); st_logCritical("> Finished generating run length matrix in %s.\n", timeDescriptor); free(timeDescriptor); // while(1); // Use this for testing for memory leaks return 0; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3); else im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*t_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; /* { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } */ { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); if (check_mistakes) getchar(); exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 2) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 17; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
remarks_parallel_in_target_state_machine.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void baz(void) __attribute__((assume("omp_no_openmp"))); void bar(void) { #pragma omp parallel // #1 \ // expected-remark@#1 {{Parallel region is used in unknown ways. Will not attempt to rewrite the state machine. [OMP101]}} { } } void foo(void) { #pragma omp target teams // #2 // expected-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}} { baz(); // expected-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}} #pragma omp parallel { } bar(); #pragma omp parallel { } } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num deduplicated. [OMP170]}}
GB_unaryop__lnot_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_uint16 // op(A') function: GB_tran__lnot_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_uint16 ( int8_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__ainv_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fc64_fc64) // op(A') function: GB (_unop_tran__ainv_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_FC64_ainv (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC64_ainv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_FC64_ainv (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_ainv (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_ainv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
PermutohedralSubmanifoldConvolutionRules.h
// Copyright 2016-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #ifndef PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H #define PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H // N=10 // import torch // a=torch.zeros(N,N) // for i in range(N): // for j in range(i): // dp=(a[i,:]*a[j,:]).sum() // a[i,j]=(0.5-dp)/a[j,j] // dp=(a[i,:]*a[i,:]).sum() // a[i,i]=(1-dp)**0.5 // ai=torch.inverse(a) // r=1 // for dim in range(1,N+1): // c=torch.arange((2*r+1)**dim).long()[:,None].expand(-1,dim) // c=c/((2*r+1)**torch.arange(0,dim).long()) // c%=2*r+1 // c-=r // c=c.float() // for x in c: // v=(x[:,None]*a[:dim,:dim]).sum(0) // m=(v*v).sum().item() // if m<=r**2+0.01: // print(v) std::vector<std::vector<std::vector<Int>>> permutohedralOffsets = { {}, {{0}, {-1}, {1}}, {{0, -1}, {1, -1}, {-1, 0}, {0, 0}, {1, 0}, {-1, 1}, {0, 1}}, {{0, 0, 0}, {0, 0, -1}, {1, 0, -1}, {0, 1, -1}, {0, -1, 0}, {1, -1, 0}, {-1, 0, 0}, {1, 0, 0}, {-1, 1, 0}, {0, 1, 0}, {0, -1, 1}, {-1, 0, 1}, {0, 0, 1}}, {{0, 0, 0, 0}, {0, 0, 0, -1}, {1, 0, 0, -1}, {0, 1, 0, -1}, {0, 0, 1, -1}, {0, 0, -1, 0}, {1, 0, -1, 0}, {0, 1, -1, 0}, {0, -1, 0, 0}, {1, -1, 0, 0}, {-1, 0, 0, 0}, {1, 0, 0, 0}, {-1, 1, 0, 0}, {0, 1, 0, 0}, {0, -1, 1, 0}, {-1, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, -1, 1}, {0, -1, 0, 1}, {-1, 0, 0, 1}, {0, 0, 0, 1}}, {{0, 0, 0, 0, 0}, {0, 0, 0, 0, -1}, {1, 0, 0, 0, -1}, {0, 1, 0, 0, -1}, {0, 0, 1, 0, -1}, {0, 0, 0, 1, -1}, {0, 0, 0, -1, 0}, {1, 0, 0, -1, 0}, {0, 1, 0, -1, 0}, {0, 0, 1, -1, 0}, {0, 0, -1, 0, 0}, {1, 0, -1, 0, 0}, {0, 1, -1, 0, 0}, {0, -1, 0, 0, 0}, {1, -1, 0, 0, 0}, {-1, 0, 0, 0, 0}, {1, 0, 0, 0, 0}, {-1, 1, 0, 0, 0}, {0, 1, 0, 0, 0}, {0, -1, 1, 0, 0}, {-1, 0, 1, 0, 0}, {0, 0, 1, 0, 0}, {0, 0, -1, 1, 0}, {0, -1, 0, 1, 0}, {-1, 0, 0, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, -1, 1}, {0, 0, -1, 0, 1}, {0, -1, 0, 0, 1}, {-1, 0, 0, 0, 1}, {0, 0, 0, 0, 1}}, {{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, -1}, {0, 0, 1, 0, 0, -1}, {0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, -1, 0}, {0, 1, 0, 0, -1, 0}, {0, 0, 1, 0, -1, 0}, {0, 0, 0, 1, -1, 0}, {0, 0, 0, -1, 0, 0}, {1, 0, 0, -1, 0, 0}, {0, 1, 0, -1, 0, 0}, {0, 0, 1, -1, 0, 0}, {0, 0, -1, 0, 0, 0}, {1, 0, -1, 0, 0, 0}, {0, 1, -1, 0, 0, 0}, {0, -1, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0}, {-1, 0, 1, 0, 0, 0}, {0, 0, 1, 0, 0, 0}, {0, 0, -1, 1, 0, 0}, {0, -1, 0, 1, 0, 0}, {-1, 0, 0, 1, 0, 0}, {0, 0, 0, 1, 0, 0}, {0, 0, 0, -1, 1, 0}, {0, 0, -1, 0, 1, 0}, {0, -1, 0, 0, 1, 0}, {-1, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, -1, 1}, {0, 0, 0, -1, 0, 1}, {0, 0, -1, 0, 0, 1}, {0, -1, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 1}}, {{0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, -1}, {0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, -1, 0}, {0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 1, -1, 0}, {0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, -1, 0, 0}, {0, 1, 0, 0, -1, 0, 0}, {0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0}, {0, 1, 0, -1, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0}, {-1, 0, 1, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, -1, 1, 0, 0}, {0, 0, -1, 0, 1, 0, 0}, {0, -1, 0, 0, 1, 0, 0}, {-1, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, -1, 0, 1, 0}, {0, 0, -1, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, -1, 1}, {0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, -1, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 1}, {0, -1, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 1}}, {{0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, -1}, {0, 0, 1, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 1, 0, 0, -1, 0}, {0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 1, -1, 0}, {0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, -1, 0, 0}, {0, 1, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, -1, 0, 0}, {0, 0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, 0, -1, 0, 0, 0}, {0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 1, 0, -1, 0, 0, 0}, {0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, -1, 0, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0, 0}, {-1, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, -1, 1, 0, 0, 0}, {0, 0, -1, 0, 1, 0, 0, 0}, {0, -1, 0, 0, 1, 0, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, -1, 1, 0, 0}, {0, 0, 0, -1, 0, 1, 0, 0}, {0, 0, -1, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 1, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, -1, 0, 1, 0}, {0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, -1, 0, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, -1, 1}, {0, 0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 1}, {0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 1}, {0, -1, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 0, 1}}, {{0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, 0, -1}, {0, 0, 1, 0, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, 0, -1}, {0, 0, 0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, 0, 0, -1, 0}, {0, 0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 0, 1, 0, 0, -1, 0}, {0, 0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 0, 1, -1, 0}, {0, 0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, 0, -1, 0, 0}, {0, 1, 0, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, 0, -1, 0, 0}, {0, 0, 0, 1, 0, 0, -1, 0, 0}, {0, 0, 0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, 0, 0, -1, 0, 0, 0}, {0, 1, 0, 0, 0, -1, 0, 0, 0}, {0, 0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 0, 1, 0, -1, 0, 0, 0}, {0, 0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 0}, {1, 0, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, 0, -1, 0, 0, 0, 0}, {0, 0, 1, 0, -1, 0, 0, 0, 0}, {0, 0, 0, 1, -1, 0, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0, 0, 0}, {0, 1, 0, -1, 0, 0, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0, 0, 0}, {-1, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, -1, 1, 0, 0, 0, 0}, {0, 0, -1, 0, 1, 0, 0, 0, 0}, {0, -1, 0, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 1, 0, 0, 0}, {0, 0, 0, -1, 0, 1, 0, 0, 0}, {0, 0, -1, 0, 0, 1, 0, 0, 0}, {0, -1, 0, 0, 0, 1, 0, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 1, 0, 0}, {0, 0, 0, 0, -1, 0, 1, 0, 0}, {0, 0, 0, -1, 0, 0, 1, 0, 0}, {0, 0, -1, 0, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 0, 1, 0, 0}, {-1, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, 0, -1, 0, 1, 0}, {0, 0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, 0, -1, 0, 0, 0, 1, 0}, {0, 0, -1, 0, 0, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, -1, 1}, {0, 0, 0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, 0, 0, -1, 0, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, 0, -1, 0, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 0, 1}, {0, -1, 0, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 0, 0, 1}}, {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, 0, 0, -1}, {0, 0, 1, 0, 0, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, 0, 0, -1}, {0, 0, 0, 0, 1, 0, 0, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, 0, 0, 0, -1, 0}, {0, 0, 0, 1, 0, 0, 0, 0, -1, 0}, {0, 0, 0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 0, 0, 1, 0, 0, -1, 0}, {0, 0, 0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 0, 0, 1, -1, 0}, {0, 0, 0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, 0, 0, -1, 0, 0}, {0, 1, 0, 0, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, 0, 0, -1, 0, 0}, {0, 0, 0, 1, 0, 0, 0, -1, 0, 0}, {0, 0, 0, 0, 1, 0, 0, -1, 0, 0}, {0, 0, 0, 0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, 0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, 0, 0, 0, -1, 0, 0, 0}, {0, 1, 0, 0, 0, 0, -1, 0, 0, 0}, {0, 0, 1, 0, 0, 0, -1, 0, 0, 0}, {0, 0, 0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 0, 0, 1, 0, -1, 0, 0, 0}, {0, 0, 0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 0, 0, 0}, {1, 0, 0, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, 0, 0, -1, 0, 0, 0, 0}, {0, 0, 1, 0, 0, -1, 0, 0, 0, 0}, {0, 0, 0, 1, 0, -1, 0, 0, 0, 0}, {0, 0, 0, 0, 1, -1, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 0, 0}, {1, 0, 0, 0, -1, 0, 0, 0, 0, 0}, {0, 1, 0, 0, -1, 0, 0, 0, 0, 0}, {0, 0, 1, 0, -1, 0, 0, 0, 0, 0}, {0, 0, 0, 1, -1, 0, 0, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0, 0, 0, 0}, {0, 1, 0, -1, 0, 0, 0, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0, 0, 0, 0}, {-1, 0, 1, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 0, -1, 1, 0, 0, 0, 0, 0}, {0, 0, -1, 0, 1, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 1, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 1, 0, 0, 0, 0}, {0, 0, 0, -1, 0, 1, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 1, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 1, 0, 0, 0}, {0, 0, 0, 0, -1, 0, 1, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 1, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 1, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 1, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 0, -1, 1, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 1, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 1, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 1, 0, 0}, {0, 0, -1, 0, 0, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 1, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, 0, 0, -1, 0, 1, 0}, {0, 0, 0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 1, 0}, {0, 0, 0, -1, 0, 0, 0, 0, 1, 0}, {0, 0, -1, 0, 0, 0, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 0, -1, 1}, {0, 0, 0, 0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, 0, 0, 0, -1, 0, 0, 1}, {0, 0, 0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 0, 0, 1}, {0, 0, 0, -1, 0, 0, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 0, 0, 1}, {0, -1, 0, 0, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}}; template <Int dimension> class PermutohedralRegionIterator; template <Int dimension> class PermutohedralRegion { public: Point<dimension> x; PermutohedralRegion(const Point<dimension> &x) : x(x) { assert(dimension <= 10); } PermutohedralRegionIterator<dimension> begin() { return PermutohedralRegionIterator<dimension>(*this, x); } PermutohedralRegionIterator<dimension> end() { // Not really used by the custom operator!= function return PermutohedralRegionIterator<dimension>(*this, x); } }; template <Int dimension> class PermutohedralRegionIterator { private: PermutohedralRegion<dimension> &region; unsigned int offset; public: Point<dimension> point; bool stillLooping; PermutohedralRegionIterator(PermutohedralRegion<dimension> &region, Point<dimension> &point) : region(region), offset(0), point(point), stillLooping(true) {} PermutohedralRegionIterator<dimension> &operator++() { auto offsets = permutohedralOffsets[dimension]; if (++offset == offsets.size()) { stillLooping = false; // Signal to operator!= to end iteration } else { auto delta = offsets[offset]; for (Int i = 0; i < dimension; ++i) point[i] = region.x[i] + delta[i]; } return *this; } Point<dimension> &operator*() { return point; } }; // Only to be used for checking the end point of range based for loops. template <Int dimension> inline bool operator!=(const PermutohedralRegionIterator<dimension> &lhs, const PermutohedralRegionIterator<dimension> &rhs) { return lhs.stillLooping; } // Call for each convolutional layer, once for each batch item. // rules is used to carry out the "lowering" whilst carrying out the convolution template <Int dimension> double PermutohedralSubmanifoldConvolution_SgToRules(SparseGrid<dimension> &grid, RuleBook &rules) { double countActiveInputs = 0; for (auto const &outputIter : grid.mp) { auto inRegion = PermutohedralRegion<dimension>(outputIter.first); Int rulesOffset = 0; for (auto inputPoint : inRegion) { auto inputIter = grid.mp.find(inputPoint); if (inputIter != grid.mp.end()) { rules[rulesOffset].push_back(inputIter->second + grid.ctr); rules[rulesOffset].push_back(outputIter.second + grid.ctr); countActiveInputs++; } rulesOffset++; } } return countActiveInputs; } template <Int dimension> Int PermutohedralSubmanifoldConvolution_SgsToRules(SparseGrids<dimension> &SGs, RuleBook &rules) { Int sd = permutohedralOffsets[dimension].size(); Int countActiveInputs = 0; rules.clear(); rules.resize(sd); for (Int i = 0; i < (Int)SGs.size(); i++) countActiveInputs += PermutohedralSubmanifoldConvolution_SgToRules<dimension>(SGs[i], rules); return countActiveInputs; } template <Int dimension> Int PermutohedralSubmanifoldConvolution_SgsToRules_OMP( SparseGrids<dimension> &SGs, RuleBook &rules) { std::vector<RuleBook> rbs(SGs.size()); std::vector<double> countActiveInputs(SGs.size()); rules.clear(); Int sd = permutohedralOffsets[dimension].size(); rules.resize(sd); { Int i; #pragma omp parallel for private(i) for (i = 0; i < (Int)SGs.size(); i++) { rbs[i].resize(sd); countActiveInputs[i] = PermutohedralSubmanifoldConvolution_SgToRules<dimension>(SGs[i], rbs[i]); } } { Int i; #pragma omp parallel for private(i) for (i = 0; i < sd; i++) for (auto const &rb : rbs) rules[i].insert(rules[i].end(), rb[i].begin(), rb[i].end()); } Int countActiveInputs_ = 0; for (auto &i : countActiveInputs) countActiveInputs_ += i; return countActiveInputs_; } #endif /* PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H */
GB_unaryop__abs_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_fp64 // op(A') function: GB_tran__abs_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
THTensorMath.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorMath.c" #else #ifndef NAN #define NAN (nan(NULL)) #endif #ifdef _OPENMP #include <omp.h> #endif #define TH_OMP_OVERHEAD_THRESHOLD 100000 #ifdef _OPENMP #ifndef _WIN32 #define PRAGMA(P) _Pragma(#P) #else #define PRAGMA(P) __pragma(P) #endif #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ int inOmp = omp_in_parallel(); \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \ PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \ ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ int inOmp = omp_in_parallel(); \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ int inOmp = omp_in_parallel(); \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if ((TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOmp))) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif #define TH_CHECK_SAME_SIZE(TENSOR1, TENSOR2) \ { \ if(!THTensor_(isSameSizeAs)(TENSOR1, TENSOR2)) { \ THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ THError("inconsistent tensor size, expected %s %s and %s %s to have the same size", \ #TENSOR1, T1buff.str, #TENSOR2, T2buff.str); \ } \ } // Used for `scatter` and `scatterAdd` // Assumes TENSOR1 is real // TENSOR2 is src // TENSOR3 is index // Tests: // 1. index->size[d] <= src->size[d] for all d // 2. index->size[d] <= real->size[d] for all d != dim #define TH_TENSOR_DIM_APPLY3_SIZE_SCATTER(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \ { \ int shape_check_flag = 0; \ for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ { \ int64_t TENSOR3##_dim_size = TENSOR3->size[TH_TENSOR_DIM_APPLY_i]; \ if (TH_TENSOR_DIM_APPLY_i != DIMENSION) { \ if (TENSOR3##_dim_size > TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) { \ shape_check_flag = 1; \ break; \ } \ } \ if (TENSOR3##_dim_size > TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \ shape_check_flag = 1; \ break; \ } \ } \ if (shape_check_flag == 1) { \ THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \ THError("Expected %s %s to be smaller size than %s %s and to be smaller than %s %s apart from dimension %d", \ #TENSOR3, T3buff.str, #TENSOR2, T2buff.str, #TENSOR1, T1buff.str, DIMENSION); \ } \ } static inline real THTensor_(powOne)(real x, real y) { #if defined(TH_REAL_IS_FLOAT) return powf(x, y); #elif defined(TH_REAL_IS_DOUBLE) return pow(x, y); #else THArgCheck(y >= 0, 1, "Integers to negative integer powers are not allowed"); real result = 1; while (y) { if (y & 1) { result *= x; } y /= 2; x *= x; } return result; #endif } void THTensor_(fill)(THTensor *r_, real value) { if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) { TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len);); } else { TH_TENSOR_APPLY(real, r_, if (r__stride == 1) { THVector_(fill)(r__data, value, r__size); r__i = r__size; r__data += r__stride * r__size; break; } else { *r__data = value; } ); } } void THTensor_(zero)(THTensor *r_) { THTensor_(fill)(r_, 0); } void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) { TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = value; }); } void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); real *src_data = THTensor_(data)(srct); ptrdiff_t cntr = 0; ptrdiff_t nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) { THTensor_(free)(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { if (cntr == nelem) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Number of elements of src < number of ones in mask"); } *tensor_data = *src_data; src_data++; cntr++; }); THTensor_(free)(srct); } void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { ptrdiff_t numel = THByteTensor_sumall(mask); real *tensor_data; #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THTensor_(resize1d)(tensor,numel); tensor_data = THTensor_(data)(tensor); TH_TENSOR_APPLY2(real, src, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(src_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = *src_data; tensor_data++; }); } // Finds non-zero elements of a tensor and returns their subscripts void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) { ptrdiff_t numel = 0; int64_t *subscript_data; int64_t i = 0; int64_t dim; int64_t div = 1; #ifdef TH_REAL_IS_HALF #define IS_NONZERO(val) ((val.x & 0x7fff) != 0) #else #define IS_NONZERO(val) ((val)!=0) #endif /* First Pass to determine size of subscripts */ TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { ++numel; }); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THLongTensor_resize2d(subscript, numel, tensor->nDimension); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { div = 1; for (dim = tensor->nDimension - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } subscript_data += tensor->nDimension; } ++i;); } void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { ptrdiff_t i, numel; THLongStorage *newSize; THTensor *tSlice, *sSlice; int64_t *index_data; real *tensor_data, *src_data; THArgCheck(index->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); numel = THLongTensor_nElement(index); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize,src->size); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif newSize->data[dim] = numel; THTensor_(resize)(tensor,newSize,NULL); THLongStorage_free(newSize); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { tensor_data = THTensor_(data)(tensor); src_data = THTensor_(data)(src); ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0]; // check that the indices are within range int64_t max = src->size[0] - 1 + TH_INDEX_BASE; for (i=0; i<numel; i++) { if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) { THLongTensor_free(index); THError("index out of range"); } } if (src->nDimension == 1) { #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE]; } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real)); } } else if (src->nDimension == 1) { for (i=0; i<numel; i++) THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE)); } else { for (i=0; i<numel; i++) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor, dim, i); THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE); THTensor_(copy)(tSlice, sSlice); THTensor_(free)(tSlice); THTensor_(free)(sSlice); } } THLongTensor_free(index); } void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; // Error checking for this function has moved to ATen!! numel = THLongTensor_nElement(index); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(copy)(tSlice, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i)); } } THLongTensor_free(index); } static ptrdiff_t THTensor_(dataOffset)(THTensor* tensor, ptrdiff_t linearIndex) { int64_t *size = tensor->size; int64_t *stride = tensor->stride; int nDim = tensor->nDimension; ptrdiff_t dataOffset = 0; for (int i = nDim - 1; i >= 0; i--) { dataOffset += (linearIndex % size[i]) * stride[i]; linearIndex /= size[i]; } return dataOffset; } static void THTensor_(checkLinearIndex)(int64_t linearIndex, int64_t numel) { THArgCheck(linearIndex < numel && linearIndex >= -numel, 2, "out of range: %d out of %d", (int)linearIndex, (int)numel); } static int64_t THTensor_(wrapLinearIndex)(int64_t linearIndex, int64_t numel) { return linearIndex < 0 ? linearIndex + numel : linearIndex; } void THTensor_(take)(THTensor *r_, THTensor *src, THLongTensor *index) { THTensor_(resizeNd)(r_, index->nDimension, index->size, NULL); THTensor* dst = THTensor_(newContiguous)(r_); index = THLongTensor_newContiguous(index); int64_t* index_data = THLongTensor_data(index); ptrdiff_t srcElements = THTensor_(nElement)(src); real* src_data = THTensor_(data)(src); real* dst_data = THTensor_(data)(dst); ptrdiff_t nIndices = THLongTensor_nElement(index); int isContiguous = THTensor_(isContiguous)(src); // Exceptions must not be thrown across OpenMP parallel sections, so we // record the value of the invalid index and throw the exception after the // loop. int64_t invalidIdx = -1; ptrdiff_t i; #pragma omp parallel for if(nIndices > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i = 0; i < nIndices; i++) { int64_t idx = index_data[i]; if (idx < srcElements && idx >= -srcElements) { idx = THTensor_(wrapLinearIndex)(idx, srcElements); if (isContiguous) { dst_data[i] = src_data[idx]; } else { dst_data[i] = src_data[THTensor_(dataOffset)(src, idx)]; } } else { THAtomicCompareAndSwapLong(&invalidIdx, -1, idx); } } if (invalidIdx >= 0) { THTensor_(checkLinearIndex)(invalidIdx, srcElements); } THLongTensor_free(index); THTensor_(freeCopyTo)(dst, r_); } void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int accumulate) { THArgCheck(THLongTensor_nElement(index) == THTensor_(nElement)(src), 3, "src should have the same number of elements as index"); index = THLongTensor_newContiguous(index); src = THTensor_(newContiguous)(src); real* data = THTensor_(data)(tensor); ptrdiff_t numel = THTensor_(nElement)(tensor); int is_contiguous = THTensor_(isContiguous)(tensor); TH_TENSOR_APPLY2(int64_t, index, real, src, THTensor_(checkLinearIndex)(*index_data, numel); int64_t linearIndex = THTensor_(wrapLinearIndex)(*index_data, numel); int64_t dataOffset = is_contiguous ? linearIndex : THTensor_(dataOffset)(tensor, linearIndex); if (accumulate) { data[dataOffset] += *src_data; } else { data[dataOffset] = *src_data; } ); THTensor_(free)(src); THLongTensor_free(index); } void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE)); } } THLongTensor_free(index); } void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { ptrdiff_t i, numel; THTensor *tSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE); THTensor_(fill)(tSlice, val); THTensor_(free)(tSlice); } else { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val); } } THLongTensor_free(index); } void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { int64_t elems_per_row, i, idx; THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(dim >= 0 && dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in gather"); } *(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride]; }) } void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_SCATTER, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride); }) } void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_SCATTER, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatterAdd"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride); }) } void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val; }) } accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ TH_TENSOR_APPLY2(real, tensor, real, src, int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; src_i += sz; tensor_data += sz*tensor_stride; src_data += sz*src_stride; break;); return sum; } #undef th_isnan #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan(val) \ (std::isnan(val)) #else #define th_isnan(val) (0) #endif #undef th_isnan_break #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan_break(val) \ if (std::isnan(val)) break; #else #define th_isnan_break(val) #endif real THTensor_(minall)(THTensor *tensor) { real theMin; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMin = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theMin = value; th_isnan_break(value) }); return theMin; } real THTensor_(maxall)(THTensor *tensor) { real theMax; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMax = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theMax = value; th_isnan_break(value) }); return theMax; } static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride); real THTensor_(medianall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); real theMedian; ptrdiff_t numel; int64_t k; THTensor *temp_; real *temp__data; numel = THTensor_(nElement)(tensor); k = (numel-1) >> 1; temp_ = THTensor_(newClone)(tensor); temp__data = THTensor_(data)(temp_); THTensor_(quickselectnoidx)(temp__data, k, numel, 1); theMedian = temp__data[k]; THTensor_(free)(temp_); return theMedian; } accreal THTensor_(sumall)(THTensor *tensor) { accreal sum = 0; int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if(inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, +:sum, sum += *tensor_data;); } #else serial_path = 1; #endif if (serial_path) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); } return sum; } accreal THTensor_(prodall)(THTensor *tensor) { accreal prod = 1; int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if(inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY_REDUCTION_OMP(real, tensor, *:prod, prod *= *tensor_data;); } #else serial_path = 1; #endif if (serial_path) { TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); } return prod; } void THTensor_(add)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data + value;) } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); } } void THTensor_(sub)(THTensor *r_, THTensor *t, real value) { THTensor_(add)(r_, t, -value); } void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha) { THTensor_(add)(r_, t, value * alpha); } void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha) { THTensor_(add)(r_, t, -value * alpha); } void THTensor_(mul)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data * value;) } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); } } void THTensor_(div)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len);); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data / value;) } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); } } void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(mul)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(mul)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("lshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << value; #else rp[i] = ((ureal) tp[i]) << value; #endif } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) << value);); #else TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) << value);); #endif } #else serial_path = 1; #endif } if (serial_path) { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) << value);); #endif } #endif } void THTensor_(rshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(div)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(div)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("rshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> value; #else rp[i] = ((ureal) tp[i]) >> value; #endif } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((real) *t_data) >> value);); #else TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (((ureal) *t_data) >> value);); #endif } #else serial_path = 1; #endif } if (serial_path) { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value);); #endif } #endif } void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], value); #else rp[i] = tp[i] % value; #endif } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = fmod(*t_data, value);); #else TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data % value);); #endif } #else serial_path = 1; #endif } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value);); #endif } } static inline bool has_different_sign(real a, real b) { return (a < 0) != (b < 0); } void THTensor_(remainder)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value); #else // There is no NAN for integers rp[i] = tp[i] % value; if (has_different_sign(rp[i], value)) rp[i] += value; #endif } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value);); #else // There is no NAN for integers TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data % value; if (has_different_sign(*r__data, value)) *r__data += value;); #endif } #else serial_path = 1; #endif } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value);); #else // There is no NAN for integers TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value; if (has_different_sign(*r__data, value)) *r__data += value;); #endif } } void THTensor_(bitand)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)value; return THError("bitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int serial_path = 0; int tContig = THTensor_(isContiguous)(t); if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] & value; } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data & value;); } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;); } #endif } void THTensor_(bitor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)value; return THError("bitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] | value; } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data | value;); } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;); } #endif } void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)value; return THError("bitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] ^ value; } } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = *t_data ^ value;); } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;); } #endif } void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); /* real t_val; */ int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]); } else { #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { if(r_ == t) { THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); } else { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } } void THTensor_(csub)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(cadd)(r_, t, -value, src); } void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } } void THTensor_(pow)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if(value == 1){ THTensor_(copy)(r_, t); } else if(value == 2){ THTensor_(cmul)(r_, t, t); } else if(value == 3){ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * *t_data * *t_data;); } #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #if defined (TH_REAL_IS_FLOAT) #define TH_MATH_NAME(fn) fn##f #else #define TH_MATH_NAME(fn) fn #endif else if(value == 0.5){ THTensor_(sqrt)(r_, t); } else if(value == -0.5){ THTensor_(rsqrt)(r_, t); } else if(value == -1){ THTensor_(cinv)(r_, t); } else if(value == -2){ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data);); } else{ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(pow)(*t_data, value);); } #undef TH_MATH_NAME #else else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(*t_data, value);); } #endif } void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) rp[i] = THTensor_(powOne)(tp[i], sp[i]); } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data);); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = THTensor_(powOne)(*t_data, *src_data);); } } void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len);); } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } } void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("clshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] * powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] * pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << sp[i]; #else rp[i] = ((ureal) tp[i]) << sp[i]; #endif } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;); #else TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;); #endif } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;); #endif } } void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("crshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] / powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] / pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> sp[i]; #else rp[i] = ((ureal) tp[i]) >> sp[i]; #endif } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); #else TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;); #endif } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;); #endif } } void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], sp[i]); #else rp[i] = tp[i] % sp[i]; #endif } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig,real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data);); #else TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*t_data % *src_data);); #endif } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data);); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data);); #endif } } void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]); #else // There is no NAN for integers rp[i] = tp[i] % sp[i]; if (rp[i] * sp[i] < 0) rp[i] += sp[i]; #endif } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data);); #else TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data % *src_data; if (*r__data * *src_data < 0) *r__data += *src_data;); #endif } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data);); #else // There is no NAN for integers TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data; if (*r__data * *src_data < 0) *r__data += *src_data;); #endif } } void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)src; return THError("cbitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] & sp[i]; } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data & *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;); } #endif } void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)src; return THError("cbitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] | sp[i]; } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data | *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;); } #endif } void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) (void)r_; (void)t; (void)src; return THError("cbitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int64_t srcSize = THTensor_(nElement)(src); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int srcContig = THTensor_(isContiguous)(src); int serial_path = 0; if (srcSize == r_Size){ if (r_Contig && tContig && srcContig) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) { rp[i] = tp[i] ^ sp[i]; } } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, tContig, srcContig, real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;); } #else serial_path = 1; #endif } } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;); } #endif } void THTensor_(tpow)(THTensor *r_, real value, THTensor *t) { THTensor_(resizeAs)(r_, t); int64_t r_Size = THTensor_(nElement)(r_); int r_Contig = THTensor_(isContiguous)(r_); int tContig = THTensor_(isContiguous)(t); int serial_path = 0; if (r_Contig && tContig) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t i; #pragma omp parallel for if(r_Size > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<r_Size; i++) rp[i] = THTensor_(powOne)(value, tp[i]); } else { #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data);); } #else serial_path = 1; #endif } if (serial_path) { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = THTensor_(powOne)(value, *t_data);); } } void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } int64_t r_Size = THTensor_(nElement)(r_); int64_t src1Size = THTensor_(nElement)(src1); int64_t src2Size = THTensor_(nElement)(src2); int r_Contig = THTensor_(isContiguous)(r_); int src1Contig = THTensor_(isContiguous)(src1); int src2Contig = THTensor_(isContiguous)(src2); int serial_path = 0; if( (src1Size == src2Size) && (src1Size == r_Size) ){ #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } #else serial_path = 1; #endif } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } } void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } int64_t r_Size = THTensor_(nElement)(r_); int64_t src1Size = THTensor_(nElement)(src1); int64_t src2Size = THTensor_(nElement)(src2); int r_Contig = THTensor_(isContiguous)(r_); int src1Contig = THTensor_(isContiguous)(src1); int src2Contig = THTensor_(isContiguous)(src2); int serial_path = 0; if( (src1Size == src2Size) && (src1Size == r_Size) ){ #if _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { TH_TENSOR_APPLY3_OMP(r_Size, r_Contig, src1Contig, src2Contig, real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } #else serial_path = 1; #endif } else { serial_path = 1; } if (serial_path) { TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } } void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) { THDescBuff bm = THTensor_(sizeDesc)(mat); THDescBuff bv = THTensor_(sizeDesc)(vec); THError("size mismatch, %s, %s", bm.str, bv.str); } if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm = THTensor_(sizeDesc)(mat); THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } // n == 1 || lda >= max(1, m) #define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M))) if(mat->stride[0] == 1 && LDA_COND(mat->size[0], mat->size[1], mat->stride[1])) { THBlas_(gemv)('n', mat->size[0], mat->size[1], alpha, THTensor_(data)(mat), mat->stride[1], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else if(mat->stride[1] == 1 && LDA_COND(mat->size[1], mat->size[0], mat->stride[0])) { THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(mat), mat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(cmat), cmat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); THTensor_(free)(cmat); } #undef LDA_COND } void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) { int64_t N1 = m1->size[0]; int64_t N2 = m2->size[0]; int64_t dim; real *m1_p; real *m2_p; real *r_p; int64_t i; THTensor_(resize2d)(r_, N1, N2); m1 = THTensor_(newContiguous)(m1); m2 = THTensor_(newContiguous)(m2); THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); dim = m1->size[1]; THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); m1_p = THTensor_(data)(m1); m2_p = THTensor_(data)(m2); r_p = THTensor_(data)(r_); #pragma omp parallel for private(i) for (i=0; i<N1; i++) { int64_t j,k; for (j=0; j<N2; j++) { real sum = 0; for (k=0; k<dim; k++) { real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ]; sum += term*term; } r_p[ i*N2 + j ] = gain * sum; } } THTensor_(free)(m1); THTensor_(free)(m2); } void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THTensor *r__, *m1_, *m2_; int free_m1 = 0; int free_m2 = 0; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) { THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THTensor_(resizeAs)(r_, t); if (beta != 0.0) { THTensor_(copy)(r_, t); } } // n == 1 || ldc >= max(1, m) #define LDC_COND(M, N, LDC) ((N) == 1 || (LDC) >= THMax(1, M)) /* r_ */ if(r_->stride[0] == 1 && LDC_COND(r_->size[0], r_->size[1], r_->stride[1])) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && LDC_COND(r_->size[1], r_->size[0], r_->stride[0])) { THTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; // make r__ FORTRAN contiguous THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1); r__ = THTensor_(newClone)(transp_r_); THTensor_(free)(transp_r_); THTensor_(transpose)(r__, NULL, 0, 1); } #undef LDC_COND int64_t m = r__->size[(transpose_r == 'n' ? 0 : 1)]; int64_t n = r__->size[(transpose_r == 'n' ? 1 : 0)]; int64_t k = m1->size[(transpose_r == 'n' ? 1 : 0)]; int64_t ldr__ = r__->stride[(transpose_r == 'n' ? 1 : 0)]; /* m1 */ /* Need ldm1_ >= max(1, (transpose_m1 == 'n' ? m : k)) */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, m)) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, k)) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THTensor_(newContiguous)(m1); free_m1 = 1; } /* m2 */ /* Need ldm2_ >= max(1, (transpose_m2 == 'n' ? k : n)) */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] >= THMax(1, k)) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] >= THMax(1, n)) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THTensor_(newContiguous)(m2); free_m2 = 1; } int64_t ldm1_ = (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]); int64_t ldm2_ = (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]); #pragma omp critical(blasgemm) /* do the operation */ THBlas_(gemm)(transpose_m1, transpose_m2, m, n, k, alpha, THTensor_(data)(m1_), ldm1_, THTensor_(data)(m2_), ldm2_, beta, THTensor_(data)(r__), ldr__); /* free intermediate variables */ if(free_m1) THTensor_(free)(m1_); if(free_m2) THTensor_(free)(m2_); if(r__ != r_) THTensor_(freeCopyTo)(r__, r_); } void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bv1 = THTensor_(sizeDesc)(vec1); THDescBuff bv2 = THTensor_(sizeDesc)(vec2); THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(beta == 0) { THTensor_(zero)(r_); } else if(beta != 1) THTensor_(mul)(r_, r_, beta); // n == 1 || lda >= max(1, m) #define LDA_COND(M, N, LDA) ((N) == 1 || (LDA) >= THMax(1, (M))) if(r_->stride[0] == 1 && LDA_COND(vec1->size[0], vec2->size[0], r_->stride[1])) { THBlas_(ger)(vec1->size[0], vec2->size[0], alpha, THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(r_), r_->stride[1]); } else if(r_->stride[1] == 1 && LDA_COND(vec2->size[0], vec1->size[0], r_->stride[0])) { THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(cr), cr->stride[0]); THTensor_(freeCopyTo)(cr, r_); } #undef LDA_COND } void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); if (beta != 0.0) { THTensor_(copy)(result, t); } } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); beta = 1; // accumulate output once } THTensor_(free)(matrix1); THTensor_(free)(matrix2); } void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); int64_t bs = THTensor_(size)(batch1, 0); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); if (beta != 0.0) { THTensor_(copy)(result, t); } } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); THTensor *result_matrix = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(select)(result_matrix, result, 0, batch); THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); } THTensor_(free)(matrix1); THTensor_(free)(matrix2); THTensor_(free)(result_matrix); } ptrdiff_t THTensor_(numel)(THTensor *t) { return THTensor_(nElement)(t); } // Helper function to be used in a reduction operation. // Due to resize semantics of outputs, if the specified output tensor r_ has // same size as the output of the reduction operation, then any noncontiguities // in r_ should be preserved. // The reduction operation, however, needs to act on r_ with an extra dimension // (the reduced dimension), so this function "resizes" r_ and preserves its // noncontiguities if necessary. void THTensor_(preserveReduceDimSemantics)( THTensor *r_, int in_dims, int reduce_dimension, int keepdim) { if (r_ && !keepdim && THTensor_(nDimension)(r_) == in_dims - 1 && THTensor_(nDimension)(r_) != 0) { THTensor_(unsqueeze1d)(r_, r_, reduce_dimension); } } void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); int in_dims = THTensor_(nDimension)(t); THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim); THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); } if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); int in_dims = THTensor_(nDimension)(t); THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim); THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value >= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); } if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); int r_Dim = r_->nDimension; #pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD) for (iter = 0; iter < r_Size; iter++) { int j; int64_t quot; int64_t rem = iter; ptrdiff_t tBasicIndex = 0; for(j = 0; j < r_Dim; ++j) { if(j != dimension){ quot = rem/r_->stride[j]; rem = rem%r_->stride[j]; tBasicIndex += quot*t->stride[j]; } } real *t_data = tp+tBasicIndex; real *r__data = rp+iter; *r__data = 0; for(j=0; j < t->size[dimension]; ++j) { *r__data += *(t_data + j*t->stride[dimension]); } } } else { serial_path = 1; } } #else serial_path = 1; #endif if (serial_path) { // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum;); } else { THTensor_(zero)(r_); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;); THTensor_(free)(temp_); } } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); int serial_path = 0; #ifdef _OPENMP int inOMP = omp_in_parallel(); if (inOMP) { serial_path = 1; } else { int r_Contig = THTensor_(isContiguous)(r_); real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); if(r_Contig && (tp != rp)){ ptrdiff_t iter = 0; ptrdiff_t r_Size = THTensor_(nElement)(r_); int r_Dim = r_->nDimension; #pragma omp parallel for if ( r_Size > TH_OMP_OVERHEAD_THRESHOLD) for (iter = 0; iter < r_Size; iter++) { int j; int64_t quot; int64_t rem = iter; ptrdiff_t tBasicIndex = 0; for(j = 0; j < r_Dim; ++j) { if(j != dimension){ quot = rem/r_->stride[j]; rem = rem%r_->stride[j]; tBasicIndex += quot*t->stride[j]; } } real *t_data = tp+tBasicIndex; real *r__data = rp+iter; *r__data = 1; for(j=0; j < t->size[dimension]; ++j) { *r__data *= *(t_data + j*t->stride[dimension]); } } } else { serial_path = 1; } } #else serial_path = 1; #endif if(serial_path) { // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal prod = 1; int64_t i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; *r__data = (real)prod;); } else { THTensor_(fill)(r_, 1); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;); THTensor_(free)(temp_); } } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumsum = 0; int64_t i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; r__data[i*r__stride] = (real)cumsum; }); } void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumprod = 1; int64_t i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; r__data[i*r__stride] = (real)cumprod; }); } void THTensor_(sign)(THTensor *r_, THTensor *t) { THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); #endif } accreal THTensor_(trace)(THTensor *t) { real *t_data = THTensor_(data)(t); accreal sum = 0; int64_t i = 0; int64_t t_stride_0, t_stride_1, t_diag_size; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); while(i < t_diag_size) { sum += t_data[i*(t_stride_0+t_stride_1)]; i++; } return sum; } void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) { int i; if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) THError("inconsistent tensor dimension %dD, %dD", THTensor_(nDimension)(a), THTensor_(nDimension)(b)); for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { THDescBuff ba = THTensor_(sizeDesc)(a); THDescBuff bb = THTensor_(sizeDesc)(b); THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); } } if(dimension < 0) { for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) == 3) { dimension = i; break; } } if(dimension < 0) { THDescBuff ba = THTensor_(sizeDesc)(a); THError("no dimension of size 3 in a: %s", ba.str); } } THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", dimension + TH_INDEX_BASE); THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, a); TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); } void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data > value ? *t_data : value;); } void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data < value ? *t_data : value;); } void THTensor_(zeros)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(zero)(r_); } void THTensor_(zerosLike)(THTensor *r_, THTensor *input) { THTensor_(resizeAs)(r_, input); THTensor_(zero)(r_); } void THTensor_(onesLike)(THTensor *r_, THTensor *input) { THTensor_(resizeAs)(r_, input); THTensor_(fill)(r_, 1); } void THTensor_(ones)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(fill)(r_, 1); } void THTensor_(diag)(THTensor *r_, THTensor *t, int k) { THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); if(THTensor_(nDimension)(t) == 1) { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_size = THTensor_(size)(t, 0); int64_t sz = t_size + (k >= 0 ? k : -k); real *r__data; int64_t r__stride_0; int64_t r__stride_1; int64_t i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); for(i = 0; i < t_size; i++) r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; } else { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_stride_1 = THTensor_(stride)(t, 1); int64_t sz; real *r__data; int64_t r__stride_0; int64_t i; if(k >= 0) sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); for(i = 0; i < sz; i++) r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; } } void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) { real *r__data; int64_t i, sz; THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THTensor_(resize2d)(r_, n, m); THTensor_(zero)(r_); i = 0; r__data = THTensor_(data)(r_); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride[0]+r_->stride[1])] = 1; } void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THTensor_(nElement)(r_) != size) { THTensor_(resize1d)(r_, size); } TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); size = (ptrdiff_t) ceil((double)(xmax - xmin) / step); if (THTensor_(nElement)(r_) != size) { THTensor_(resize1d)(r_, size); } TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n) { real *r__data; int64_t r__stride_0; int64_t i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) r__data[i*r__stride_0] = (real)(i); for(i = 0; i < n-1; i++) { int64_t z = THRandom_random(_generator) % (n-i); real sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } } void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(copy)(r_, t); } /* I cut and pasted (slightly adapted) the quicksort code from Sedgewick's 1978 "Implementing Quicksort Programs" article http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf It is the state of the art existing implementation. The macros are here to make as close a match as possible to the pseudocode of Program 2 p.851 Note that other partition schemes exist, and are typically presented in textbook, but those are less efficient. See e.g. http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto Julien, November 12th 2013 */ #define MAX_LEVELS 300 #define M_SMALL 10 /* Limit for small subfiles */ #define ARR(III) arr[(III)*stride] #define IDX(III) idx[(III)*stride] #define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap #define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap #define ARR_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); #define BOTH_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) < piv); do { j = j-1; } while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) > ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) < piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) > piv); do { j = j-1; } while(ARR(j) < piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) < ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) > piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } #undef MAX_LEVELS #undef M_SMALL void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(rt_, t); THTensor_(copy)(rt_, t); { THLongStorage *size = THTensor_(newSizeOf)(t); THLongTensor_resize(ri_, size, NULL); THLongStorage_free(size); } if(descendingOrder) { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) } else { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) } } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. This version does not produce indices along with values. */ static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; ARR_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; ARR_SWAP(i, j); } while(1); ARR_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j, swap; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } #undef ARR #undef IDX #undef LONG_SWAP #undef REAL_SWAP #undef BOTH_SWAP void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); int in_dims = THTensor_(nDimension)(t); THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim); THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; real mode = 0; int64_t modei = 0; int64_t temp_freq = 0; int64_t max_freq = 0; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1); for(i = 0; i < t_size_dim; i++) { temp_freq++; if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1])) { if (temp_freq > max_freq) { mode = temp__data[i]; modei = tempi__data[i]; max_freq = temp_freq; } temp_freq = 0; } } *values__data = mode; *indices__data = modei;); THTensor_(free)(temp_); THLongTensor_free(tempi_); if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, int64_t k, int dimension, int keepdim) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range"); int in_dims = THTensor_(nDimension)(t); THTensor_(preserveReduceDimSemantics)(values_, in_dims, dimension, keepdim); THLongTensor_preserveReduceDimSemantics(indices_, in_dims, dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1); *values__data = temp__data[k-1]; *indices__data = tempi__data[k-1];); THTensor_(free)(temp_); THLongTensor_free(tempi_); if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { int64_t t_size_dim, k; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); t_size_dim = THTensor_(size)(t, dimension); k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim); } void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted) { int numDims = THTensor_(nDimension)(t); THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range"); int64_t sliceSize = THTensor_(size)(t, dim); THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension"); THTensor *tmpResults = THTensor_(new)(); THTensor_(resize1d)(tmpResults, sliceSize); real *tmp__data = THTensor_(data)(tmpResults); THLongTensor *tmpIndices = THLongTensor_new(); THLongTensor_resize1d(tmpIndices, sliceSize); int64_t *tmpi__data = THLongTensor_data(tmpIndices); THLongStorage *topKSize = THTensor_(newSizeOf)(t); THLongStorage_set(topKSize, dim, k); THTensor_(resize)(rt_, topKSize, NULL); THLongTensor_resize(ri_, topKSize, NULL); THLongStorage_free(topKSize); if (dir) { /* k largest elements, descending order (optional: see sorted) */ int64_t K = sliceSize - k; TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } if (K > 0) THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1); if (sorted) THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i + K]; ri__data[i*ri__stride] = tmpi__data[i + K]; }) } else { /* k smallest elements, ascending order (optional: see sorted) */ TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1); if (sorted) THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i]; ri__data[i*ri__stride] = tmpi__data[i]; }) } THTensor_(free)(tmpResults); THLongTensor_free(tmpIndices); } void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k+1, t_size_1); for(c = THMax(0, r+k+1); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; } } void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; } } void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) { THTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THTensor_(catArray)(r_, inputs, 2, dimension); } void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension); inline void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension) { int first_dims = first->nDimension; int second_dims = second->nDimension; THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = first->size[dim]; int64_t second_dim_size = second->size[dim]; THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension) { // Find a non-empty tensor to record nDims int allEmpty = 1; int nDims = 0; THTensor *notEmptyTensor; for (int i = 0; i < numInputs; i++) { int input_dims = inputs[i]->nDimension; if (input_dims == 0) { continue; } // We've found a non-empty tensor allEmpty = 0; notEmptyTensor = inputs[i]; nDims = input_dims; break; } if (allEmpty) { return; } // Compute cat_dimension based on the non-empty tensor THArgCheck(dimension >= -1 && dimension < nDims, 4, "invalid dimension %d", dimension); // When the user input dimension is -1 (i.e. -2 in C) // Then we pick the last dimension across non-empty tensors. int cat_dimension = dimension; if (dimension + TH_INDEX_BASE == -1) { cat_dimension = nDims ? nDims - 1 : 0; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THTensor *tensor = inputs[i]; if (tensor->nDimension == 0) { continue; } THTensor_(check_shape_except_dim)(notEmptyTensor, tensor, cat_dimension); cat_dim_size += tensor->size[cat_dimension]; } // Compute the size of the result THLongStorage *size = THLongStorage_newWithSize(nDims); for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = notEmptyTensor->size[dim]; if (dim == cat_dimension) { result_dim_size = cat_dim_size; } size->data[dim] = result_dim_size; } THTensor_(resize)(result, size, NULL); // Check contiguity of all inputs and result int allContiguous = 1; for (int i = 0; i < numInputs; i++) { if(inputs[i]->nDimension) { allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]); } } allContiguous = allContiguous && THTensor_(isContiguous)(result); // First path is for contiguous inputs along dim 0 // Second path for non-contiguous int64_t offset; if (cat_dimension == 0 && allContiguous) { real* result_data = result->storage->data + result->storageOffset; offset = 0; for (int j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { THTensor* input0 = inputs[j]; real* input0_data = input0->storage->data + input0->storageOffset; int64_t input0_size = THTensor_(nElement)(input0); memcpy(result_data + offset, input0_data, input0_size*sizeof(real)); offset += input0_size; } } } else { offset = 0; for (int j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { int64_t dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1; THTensor *nt = THTensor_(newWithTensor)(result); THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize); THTensor_(copy)(nt, inputs[j]); THTensor_(free)(nt); offset += dimSize; } } } THLongStorage_free(size); } int THTensor_(equal)(THTensor *ta, THTensor* tb) { int equal = 1; if(!THTensor_(isSameSizeAs)(ta, tb)) return 0; if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { real *tap = THTensor_(data)(ta); real *tbp = THTensor_(data)(tb); ptrdiff_t sz = THTensor_(nElement)(ta); ptrdiff_t i; for (i=0; i<sz; ++i){ if(tap[i] != tbp[i]) return 0; } } else { // Short-circuit the apply function on inequality TH_TENSOR_APPLY2(real, ta, real, tb, if (equal && *ta_data != *tb_data) { equal = 0; TH_TENSOR_APPLY_hasFinished = 1; break; }) } return equal; } #define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \ void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \ { \ THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ { \ THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(real, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ TENSOR_IMPLEMENT_LOGICAL(lt,<) TENSOR_IMPLEMENT_LOGICAL(gt,>) TENSOR_IMPLEMENT_LOGICAL(le,<=) TENSOR_IMPLEMENT_LOGICAL(ge,>=) TENSOR_IMPLEMENT_LOGICAL(eq,==) TENSOR_IMPLEMENT_LOGICAL(ne,!=) #ifdef _OPENMP #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ ptrdiff_t r_Size = THTensor_(nElement)(r_); \ int r_Contig = THTensor_(isContiguous)(r_); \ int tContig = THTensor_(isContiguous)(t); \ int inOMP = omp_in_parallel(); \ if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \ TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \ } \ else { \ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \ } \ } #define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ ptrdiff_t r_Size = THTensor_(nElement)(r_); \ int r_Contig = THTensor_(isContiguous)(r_); \ int tContig = THTensor_(isContiguous)(t); \ if (r_Contig && tContig) { \ TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \ } else { \ int inOMP = omp_in_parallel(); \ if( (r_Size > TH_OMP_OVERHEAD_THRESHOLD) && (!inOMP) ){ \ TH_TENSOR_APPLY2_OMP(r_Size, r_Contig, tContig, real, r_, real, t, *r__data = CFUNC(*t_data);); \ } \ else { \ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \ } \ } \ } #else #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ } \ #define LAB_IMPLEMENT_VECTORIZED_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ int r_Contig = THTensor_(isContiguous)(r_); \ int tContig = THTensor_(isContiguous)(t); \ if (r_Contig && tContig) { \ TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \ } else { \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ } \ } \ #endif LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) #if defined(TH_REAL_IS_LONG) LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) #endif /* int64_t only part */ #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) #endif /* int only part */ #if defined(TH_REAL_IS_BYTE) #define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ int THTensor_(NAME)(THTensor *tensor) \ { \ int sum = INIT_VALUE; \ TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \ return sum; \ } TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1) TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0) #endif /* Byte only part */ /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #if defined (TH_REAL_IS_FLOAT) #define TH_MATH_NAME(fn) fn##f #else #define TH_MATH_NAME(fn) fn #endif LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log)) LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma)) LAB_IMPLEMENT_BASIC_FUNCTION(digamma,TH_MATH_NAME(TH_digamma)) LAB_IMPLEMENT_BASIC_FUNCTION(trigamma,TH_MATH_NAME(TH_trigamma)) LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p)) LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp)) LAB_IMPLEMENT_BASIC_FUNCTION(expm1,TH_MATH_NAME(expm1)) LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos)) LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos)) LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh)) LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin)) LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin)) LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh)) LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan)) LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan)) LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh)) LAB_IMPLEMENT_BASIC_FUNCTION(erf,TH_MATH_NAME(erf)) LAB_IMPLEMENT_BASIC_FUNCTION(erfinv,TH_erfinv) LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt)) LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt)) LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil)) LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor)) LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round)) LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs)) LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc)) LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac)) LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / ) LAB_IMPLEMENT_VECTORIZED_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid)) void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data);); } void THTensor_(polygamma)(THTensor *r_, int64_t n, THTensor *t) { switch (n) { case 0: THTensor_(digamma)(r_, t); return; case 1: THTensor_(trigamma)(r_, t); return; default: THError("polygamma(n,x) is not implemented for n>=2"); } } void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) { THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); THTensor_(resizeAs)(r_, a); TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(sum)(r_, t, dimension, keepdim); THTensor_(div)(r_, r_, t->size[dimension]); } void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; int64_t i; for (i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; real delta = z - mean; mean += delta / (i + 1); real delta2 = z - mean; M2 += delta * delta2; } if (biased && t_size >= 2) { *r__data = TH_MATH_NAME(sqrt)(M2 / t_size); } else if (!biased && t_size >= 2) { *r__data = TH_MATH_NAME(sqrt)(M2 / (t_size - 1)); } else if (biased && t_size == 1) { *r__data = 0; } else { *r__data = NAN; }); if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; int64_t i; for (i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; real delta = z - mean; mean += delta / (i + 1); real delta2 = z - mean; M2 += delta * delta2; } if (biased && t_size >= 2) { *r__data = M2 / t_size; } else if (!biased && t_size >= 2) { *r__data = M2 / (t_size - 1); } else if (biased && t_size == 1) { *r__data = 0; } else { *r__data = NAN; }); if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(preserveReduceDimSemantics)(r_, THTensor_(nDimension)(t), dimension, keepdim); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); #define DIM_REDUCE(reduce, transform) \ TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, \ accreal sum = 0; \ int64_t i; \ for(i = 0; i < t_size; i++) { \ (reduce); \ } \ (transform);) \ if(value == 0) { DIM_REDUCE(sum += t_data[i*t_stride] != 0.0, *r__data = sum); } else if (value == 1) { DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride]), *r__data = sum); } else if (value == 2) { DIM_REDUCE(sum += t_data[i*t_stride] * t_data[i*t_stride], *r__data = TH_MATH_NAME(sqrt)(sum)); } else if (value == 3) { DIM_REDUCE(sum += TH_MATH_NAME(fabs)(t_data[i*t_stride] * t_data[i*t_stride] * t_data[i*t_stride]), *r__data = TH_MATH_NAME(pow)(sum, 1.0/3)); } else { DIM_REDUCE(sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(t_data[i*t_stride]), value), *r__data = TH_MATH_NAME(pow)(sum, 1.0/value)); } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } #undef DIM_REDUCE } accreal THTensor_(normall)(THTensor *tensor, real value) { accreal sum = 0; if(value == 0) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data);); return sum; } else if(value == 2) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else if(value == 3) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += std::abs(z*z*z);); return TH_MATH_NAME(pow)(sum, 1.0/3); } else { TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); } } void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) { int i; THTensor *rowR, *rowS; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", THTensor_(nDimension)(src)); rowR = THTensor_(new)(); rowS = THTensor_(new)(); THTensor_(resizeAs)(res, src); for (i=0; i<src->size[dimension]; i++) { real norm = 0; real new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); } else { TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value);); } norm = pow(norm, 1/value); if (norm > maxnorm) { new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( real, rowR, real, rowS, *rowR_data = (*rowS_data) * new_norm; ) } else THTensor_(copy)(rowR, rowS); } THTensor_(free)(rowR); THTensor_(free)(rowS); } accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) { real sum = 0; TH_TENSOR_APPLY2(real, tensor, real, src, sum += TH_MATH_NAME(pow)( TH_MATH_NAME(fabs)(*tensor_data - *src_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); } accreal THTensor_(meanall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); } accreal THTensor_(varall)(THTensor *tensor, int biased) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1); return sum; } accreal THTensor_(stdall)(THTensor *tensor, int biased) { return sqrt(THTensor_(varall)(tensor, biased)); } void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { THTensor_(set1d)(r_, 0, a); } else { TH_TENSOR_APPLY(real, r_, *r__data = a + (b-a)/((real)(n-1))*i; i++; ); } } void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a)); } else { TH_TENSOR_APPLY(real, r_, *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1))); i++; ); } } void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(uniform)(r_, _generator, 0, 1); } void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(normal)(r_, _generator, 0, 1); } void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { real minval; real maxval; real *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } h_data = THTensor_(data)(hist); TH_TENSOR_APPLY(real, tensor, if (*tensor_data >= minval && *tensor_data <= maxval) { const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins); h_data[THMin(bin, nbins-1)] += 1; } ); } void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor)); int dimension = 1; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); real minval; real maxval; THTensor_(resize2d)(hist, tensor->size[0], nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i; for(i = 0; i < tensor_size; i++) { if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) { const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins); hist_data[THMin(bin, nbins-1)] += 1; } } ); } // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. // Assumes x is close to zero and uses a Taylor expansion. static inline real THTensor_(beta_grad_alpha_small)(real x, real alpha, real beta) { const real factor = TH_digamma(alpha) - TH_digamma(alpha + beta) - TH_MATH_NAME(log)(x); real numer = 1; real series = numer / alpha * (factor + 1 / alpha); for (int i = 1; i <= 10; ++i) { numer *= (i - beta) * x / i; const real denom = alpha + i; series += numer / denom * (factor + 1 / denom); } const real result = x * TH_MATH_NAME(pow)(1 - x, -beta) * series; return th_isnan(result) ? 0.0 : result; } // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta. // Assumes x is close to zero and uses a Taylor expansion. static inline real THTensor_(beta_grad_beta_small)(real x, real alpha, real beta) { const real factor = TH_digamma(alpha+beta) - TH_digamma(beta); real numer = 1; real betas = 1; real dbetas = 0; real series = factor / alpha; for (int i = 1; i <= 8; ++i) { numer *= -x / i; dbetas = dbetas * (beta - i) + betas; betas = betas * (beta - i); series += numer / (alpha + i) * (dbetas + factor * betas); } const real result = -TH_MATH_NAME(pow)(1 - x, 1 - beta) * series; return th_isnan(result) ? 0.0 : result; } // Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. // Assumes alpha and beta are both large and uses a Rice saddle point expansion. // To ensure numerical stability, this computation is performed at higher precision. static inline real THTensor_(beta_grad_alpha_mid)(double x, double alpha, double beta) { const double total = alpha + beta; const double mean = alpha / total; const double std = sqrt(alpha * beta / (total + 1)) / total; if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) { // Avoid the singularity at x = mean. const double poly = 47 * x * (beta*beta)*(beta*beta) + alpha * ( (43 + 20 * (16 + 27 * beta) * x) * (beta*beta)*beta + alpha * ( 3 * (59 + 180 * beta - 90 * x) * (beta*beta) + alpha * ( (453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * ( 8 * (1 - x) * (135 * beta - 11))))); const double prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total); const double prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total); return prefactor_num / (1 - x) * poly / prefactor_den; } const double prefactor = -x / sqrt(2 * alpha * beta / total); const double stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha*alpha)) * (1 + 1 / (12 * beta) + 1 / (288 * beta*beta)) / (1 + 1 / (12 * total) + 1 / (288 * total*total)); const double term1_num = 2 * (alpha*alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta*beta); const double axbx = alpha * (x-1) + beta * x; const double term1_den = sqrt(2 * alpha / beta) * pow(total, 1.5f) * axbx*axbx; const double term1 = term1_num / term1_den; const double term2 = 0.5f * log(alpha / (total * x)); const double term3_num = sqrt(8 * alpha * beta / total); const double term3_den = beta * x + alpha * (x - 1); const double term3 = term3_num / term3_den; const double term4_base = beta * log(beta / (total * (1 - x))) + alpha * log(alpha / (total * x)); const double term4 = pow(term4_base, -1.5f); const double term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4)); return stirling * prefactor * term1234; } // Computes a scaled reparameterized gradient // -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x) // for random number x drawn from a Beta distribution Beta(alpha,beta). // This function inputs total=alpha+beta to make it easy to implement // Dirichlet reparameterized gradients in terms of Betas. static inline real THTensor_(dirichlet_grad_one)(real x, real alpha, real total) { const real beta = total - alpha; const real boundary = total * x * (1 - x); // Use an asymptotic approximation for x close to 0. if (x <= 0.5f && boundary < 2.5f) { return THTensor_(beta_grad_alpha_small)(x, alpha, beta); } // Use an asymptotic approximation for x close to 1. if (x >= 0.5f && boundary < 0.75f) { return -THTensor_(beta_grad_beta_small)(1 - x, beta, alpha); } // Use an asymptotic approximation when alpha and (total - alpha) are both large. if (alpha > 6 && beta > 6) { return THTensor_(beta_grad_alpha_mid)(x, alpha, beta); } // Use a rational correction to an analytic approximation. static const real c[2][3][3][4] = { {{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863}, {0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033}, {-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}}, {{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814}, {-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057}, {0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}}, {{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565}, {0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181}, {0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}}, {{{1, -0.02924021934, -0.04438342661, 0.007285809825}, {0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521}, {-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}}, {{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273}, {0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956}, {-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}}, {{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05}, {0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05}, {-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}}, }; const real u = TH_MATH_NAME(log)(x); const real a = TH_MATH_NAME(log)(alpha) - u; const real b = TH_MATH_NAME(log)(total) - a; const real pow_u[3] = {1, u, u * u}; const real pow_a[3] = {1, a, a * a}; real p = 0.0; real q = 0.0; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { const real ua = pow_u[i] * pow_a[j]; p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3]))); q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3]))); } } const real approx = x * (TH_digamma(total) - TH_digamma(alpha)) / beta; return p / q * approx; } void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THTensor *total) { x = THTensor_(newContiguous)(x); alpha = THTensor_(newContiguous)(alpha); total = THTensor_(newContiguous)(total); TH_CHECK_SAME_SIZE(alpha, x); TH_CHECK_SAME_SIZE(total, x); THTensor_(resizeAs)(self, x); THTensor* grad = THTensor_(newContiguous)(self); real*const grad_data = THTensor_(data)(grad); real*const x_data = THTensor_(data)(x); real*const alpha_data = THTensor_(data)(alpha); real*const total_data = THTensor_(data)(total); const int64_t numel = THTensor_(nElement)(x); int64_t i; #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for(i = 0; i < numel; ++i) { grad_data[i] = THTensor_(dirichlet_grad_one)(x_data[i], alpha_data[i], total_data[i]); } THTensor_(freeCopyTo)(grad, self); } #undef TH_MATH_NAME #endif /* floating point only part */ #undef IS_NONZERO #endif
move_template.h
/** * This is a pseudo template function with three named parameters that * must be #defined before including this file. * * FUNCTION_NAME - obvious * * BEGIN_ITER_CODE - code to be executed at the start of each * particle's move step. * * END_ITER_CODE - code to be executed at the end of each particle's * move step. */ void FUNCTION_NAME(PyObject *self, Lattice *lat) { PyObject *tmpObject; int numFallers; int i; /* XArray access context and member sub-arrays */ XArrayCtx* ctx; XArrayMember aAr, rAr, sAr, vAr, fAr; double hydroRadius, eta, noiseStDev; int size[DQ_d], tracks; /* get the Lattice size */ size[0] = lat->nx; size[1] = lat->ny; size[2] = lat->nz; eta = lat->tau_s / 3.; /* Use tmpObject to grab various attributes before conversion */ tmpObject = PyObject_GetAttrString(self, "hydroRadius"); hydroRadius = PyFloat_AS_DOUBLE(tmpObject); Py_DECREF(tmpObject); /* discard the new ref gained above */ tmpObject = PyObject_GetAttrString(self, "noiseStDev"); noiseStDev = PyFloat_AS_DOUBLE(tmpObject); Py_DECREF(tmpObject); /* discard the new ref gained above */ tmpObject = PyObject_GetAttrString(self, "tracks"); tracks = (tmpObject == Py_True); Py_DECREF(tmpObject); /* discard the new ref gained above */ ctx = XArray_initCtx(self); XArray_getMember(ctx, 'r', &rAr); if (tracks) XArray_getMember(ctx, 's', &sAr); XArray_getMember(ctx, 'v', &vAr); XArray_getMember(ctx, 'F', &fAr); XArray_getMember(ctx, 'a', &aAr); #pragma omp parallel for schedule(guided) for (i=0; i<ctx->nRows; ++i) { /* radius, position, velocity & force on a particular particle */ double a, *r, *s, *v, *F; double vinterp[DQ_d]; int j; /* set up pointers to the correct parts of the array */ r = XArray_getItem(&rAr, i); if (tracks) s = XArray_getItem(&sAr, i); v = XArray_getItem(&vAr, i); F = XArray_getItem(&fAr, i); a = *XArray_getItem(&aAr, i); BEGIN_ITER_CODE; /* interpolate the velocity to particle location */ utils_interp_single(lat, r, vinterp); /* calculate velocity vector */ for (j=0; j<DQ_d; ++j) { v[j] = vinterp[j] + /* advection */ F[j] * (1./a - 1./hydroRadius) / (6. * M_PI * eta) + /* sedimentation*/ noiseStDev*pgasdev_get(&lat->noise->gds); /* diffusion */ /* Calculate the new position, assuming unit timestep */ r[j] += v[j]; if (tracks) s[j] += v[j]; /* Now check that it's not moved off the edge of the lattice */ /* and wrap it round if it has. */ if (r[j] < 0.5) { r[j] += size[j]; } else if (r[j] > size[j] + 0.5) { r[j] -= size[j]; } } END_ITER_CODE; } /* i */ /* Delete the array access ctx */ XArray_delCtx(ctx); }
GB_binop__ne_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint8) // A*D function (colscale): GB (_AxD__ne_uint8) // D*A function (rowscale): GB (_DxB__ne_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint8) // C=scalar+B GB (_bind1st__ne_uint8) // C=scalar+B' GB (_bind1st_tran__ne_uint8) // C=A+scalar GB (_bind2nd__ne_uint8) // C=A'+scalar GB (_bind2nd_tran__ne_uint8) // C type: bool // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT8 || GxB_NO_NE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB067-restrictpointer1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* restrict pointers: no aliasing Array initialization using assignments. C99 is needed to compile this code e.g. gcc -std=c99 -c Stress-1.c */ #include <stdlib.h> typedef double real8; void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; #pragma omp parallel for private (i) for (i = 0; i <= length - 1; i += 1) { newSxx[i] = 0.0; newSyy[i] = 0.0; } } void print(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; for (i = 0; i <= length - 1; i += 1) { printf("%lf %lf\n", newSxx[i], newSyy[i]); } } int main() { int length=1000; real8* newSxx = malloc (length* sizeof (real8)); real8* newSyy = malloc (length* sizeof (real8)); foo(newSxx, newSyy, length); print(newSxx, newSyy, length); free (newSxx); free (newSyy); return 0; }
symm_c_coo_u_hi_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #define CACHELINE 64 alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) for (ALPHA_INT c = 0; c < n; c++) { ALPHA_Number tmp1, tmp2; alpha_mul(tmp1, y[index2(r, c, ldy)], beta); alpha_mul(tmp2, x[index2(r, c, ldx)], alpha); alpha_add(y[index2(r, c, ldy)], tmp1, tmp2); } ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number); ALPHA_INT block_num = (columns + block_size - 1) / block_size; if (num_threads > block_num) num_threads = block_num; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size; ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size; if (bch > columns) bch = columns; for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac > r) { ALPHA_Number val; alpha_mul_3c(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_binop__islt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_08__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_04__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int8) // A*D function (colscale): GB (_AxD__islt_int8) // D*A function (rowscale): GB (_DxB__islt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int8) // C=scalar+B GB (_bind1st__islt_int8) // C=scalar+B' GB (_bind1st_tran__islt_int8) // C=A+scalar GB (_bind2nd__islt_int8) // C=A'+scalar GB (_bind2nd_tran__islt_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
symm_x_dia_n_hi_col_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Number* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul_2c(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); alpha_madde(Y[ac],val,X[ar]); } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Number val; alpha_mul_2c(val,mat->values[index2(di,r,mat->lval)],alpha); alpha_madde(Y[r],val,X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
GB_unop__identity_int8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_uint8) // op(A') function: GB (_unop_tran__identity_int8_uint8) // C type: int8_t // A type: uint8_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_uint8) ( int8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lu2_mpi.c
//#define DO_RCOMM_LAST //#define CONCURRENT_UCOMM // lu2_mpi.c // // test program for blocked LU decomposition // // Time-stamp: <11/06/25 12:58:48 makino> //#define NOBLAS //#define TIMETEST #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <fcntl.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <getopt.h> #include "lu2_mp.h" #include <emmintrin.h> typedef double v2df __attribute__((vector_size(16))); typedef union {v2df v; double s[2];}v2u; #include <lu2tlib.h> #include <lu2tlib.h> #include <lu2lib.h> void timer_init(); double cpusec(); double wsec(); omp_lock_t my_lock; void printmat_MP(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms); #define RDIM (n+16) void copymats( int n, double a[n][RDIM], double a2[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+2;j++) a2[i][j] = a[i][j]; } } void dumpsubmat(char * s,int n1, double mat[][n1], int nrow, int ncolumn, PPARMS parms, PCONTROLS controls) { int ip, i, j; return; fprintf(stderr, "npr, npc = %d %d\n", parms->nprow, parms->npcol); sleep(MP_myprocid()*2+1); fprintf(stderr,"\n\n\n\n\n"); fprintf(stderr,"%s\n", s); for(i=0;i<nrow;i++){ fprintf(stderr,"%3d ", i); for(j=0;j<ncolumn;j++) fprintf(stderr," %6.2f", mat[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n\n\n\n\n"); fflush(stderr); } void copysubmat0(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i, j; for(i=0;i<nrow;i++){ for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; } } void copysubmat8(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i; if (nrow * ncolumn > 10000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; } }else{ for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; } } } void copysubmat16(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { int i; if (nrow * ncolumn > 10000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; d[4]=s[4]; d[5]=s[5]; d[6]=s[6]; d[7]=s[7]; } }else{ for(i=0;i<nrow;i++){ v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); __builtin_prefetch(src[i+8],0,0); d[0]=s[0]; d[1]=s[1]; d[2]=s[2]; d[3]=s[3]; d[4]=s[4]; d[5]=s[5]; d[6]=s[6]; d[7]=s[7]; } } } void copysubmat(int n1, double src[][n1], int n2, double dest[][n2], int nrow, int ncolumn ) { // assume that ncolum is multiple of 8 and // address is 16-byte aligined int i; int j; if (ncolumn < 8){ copysubmat0( n1, src, n2, dest, nrow, ncolumn); return; } #if 1 if (ncolumn == 8){ copysubmat8( n1, src, n2, dest, nrow, ncolumn); return; } if (ncolumn == 16){ copysubmat16( n1, src, n2, dest, nrow, ncolumn); return; } #endif BEGIN_TIMER(t); if (nrow * ncolumn > 30000){ #pragma omp parallel for private(i) schedule(static,64) for(i=0;i<nrow;i++){ int j; // for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); for(j=0;j<ncolumn/2;j+=8){ // __builtin_prefetch(s+j+96,0,0); #if 0 d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; d[j+4]=s[j+4]; d[j+5]=s[j+5]; d[j+6]=s[j+6]; d[j+7]=s[j+7]; #else __builtin_ia32_movntpd((double*)&d[j], s[j]); __builtin_ia32_movntpd((double*)&d[j+1], s[j+1]); __builtin_ia32_movntpd((double*)&d[j+2], s[j+2]); __builtin_ia32_movntpd((double*)&d[j+3], s[j+3]); __builtin_ia32_movntpd((double*)&d[j+4], s[j+4]); __builtin_ia32_movntpd((double*)&d[j+5], s[j+5]); __builtin_ia32_movntpd((double*)&d[j+6], s[j+6]); __builtin_ia32_movntpd((double*)&d[j+7], s[j+7]); #endif } } }else{ for(i=0;i<nrow;i++){ int j; // for(j=0;j<ncolumn;j++) dest[i][j] = src[i][j]; v2df * s = (v2df*) (src[i]); v2df * d = (v2df*) (dest[i]); for(j=0;j<ncolumn/2;j+=8){ #if 0 d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; d[j+4]=s[j+4]; d[j+5]=s[j+5]; d[j+6]=s[j+6]; d[j+7]=s[j+7]; #else __builtin_ia32_movntpd((double*)&d[j], s[j]); __builtin_ia32_movntpd((double*)&d[j+1], s[j+1]); __builtin_ia32_movntpd((double*)&d[j+2], s[j+2]); __builtin_ia32_movntpd((double*)&d[j+3], s[j+3]); __builtin_ia32_movntpd((double*)&d[j+4], s[j+4]); __builtin_ia32_movntpd((double*)&d[j+5], s[j+5]); __builtin_ia32_movntpd((double*)&d[j+6], s[j+6]); __builtin_ia32_movntpd((double*)&d[j+7], s[j+7]); #endif } } } END_TIMER(t,43,nrow*ncolumn*1.0); #if 0 for(i=0; i< nrow; i++){ for(j=0; j<ncolumn; j++){ printf(" %6.2f", src[i][j]); } printf("\n"); } printf("\n\n"); for(i=0; i< nrow; i++){ for(j=0; j<ncolumn; j++){ printf(" %6.2f", dest[i][j]); } printf("\n"); } printf("\n"); #endif } void copybvect( int n, double a[][RDIM], double b[]) { int i; for(i=0;i<n;i++)b[i] = a[i][n]; } void showresult(int n, double a[n][RDIM], double x[]) { int i, j; double emax = 0; for(i=0;i<n;i++){ int k; double b2=0; // printf("%3d: ", i); // for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); for(j=0;j<n;j++) b2 += a[i][j] * x[j]; double err = b2-a[i][n]; emax = (fabs(err) > emax) ? fabs(err):emax; // printf(" %10.3e %10.3e %10.3e %10.3e \n", x[i], a[i][n], b2, err); } printf("Emax= %10.3e\n", emax); } void readmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ for(j=0;j<n+1;j++) scanf("%le", &(a[i][j])); } } void randomsetmat( int n, int seed, double a[n][RDIM]) { long int i, j; srand48((long) seed); for(i=0;i<n;i++){ // printf("i=%d\n", i); for(j=0;j<n;j++) { double * ap = a[i]; ap[j]=drand48()-0.5; } // printf("n, i=%d\n", i); a[i][n]=drand48()-0.5;; } } void MP_randomsetmat(int nncol, int nnrow,double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int set_b, double b[]) { long int i, j; int seed; MP_message("enter MP_randomsetmat"); srandom(parms->seed*(MP_myprocid()+1)); for(i=0; i< MP_myprocid()+100; i++) seed = random(); srand48((long) seed); dprintf(9,"seed, nncol,nnrow, ncols, nrows = %d %d %d %d %d\n", seed,nncol, nnrow, controls->ncol, controls->nrow); for(i=0;i<controls->nrow;i++){ // printf("i=%d\n", i); for(j=0;j<controls->ncol;j++) { double * ap = a[i]; ap[j]=drand48()-0.5; // dprintf(9,"ap %d %d = %g\n", i,j, ap[j]); } // printf("n, i=%d\n", i); } for(i=0;i<controls->nrow;i++){ // b[i]=1; b[i]=drand48()-0.5; } MPI_Bcast(b, controls->nrow, MPI_DOUBLE, 0, controls->row_comm); if (set_b){ for(i=0;i<controls->nrow;i++){ a[i][controls->ncol]=b[i]; } } MP_message("end MP_randomsetmat"); } void calclocalnorm(int nncol, int nnrow,double a[nnrow][nncol], int nrow, int ncol, double ao[], double a1[]) { int i, j; // fprintf(stderr, "nncol=%d nnrow=%d ncol=%d nrow=%d\n", nncol, nnrow, ncol, nrow); for(j=0;j<nrow;j++) ao[j]=0; for(i=0;i<ncol;i++) a1[i]=0; for(j=0;j<nrow; j++){ for(i=0;i<ncol;i++) { double aa = fabs(a[j][i]); ao[j] += aa; a1[i] += aa; } } } double vnormi(double a[], int n) { double x = 0; int i; for(i=0;i<n;i++){ double y = fabs(a[i]); if(y > x) x = y; } return x; } void MP_calcnorm(int nncol, int nnrow,double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, double *norm1, double *norminf) { int i, j; int nrow = controls->nrow; int ncol = controls->ncol; MP_message("enter MP_calcnorm"); double ao[nnrow]; double aosum[nnrow]; double a1[nncol]; double a1sum[nncol]; print_current_time("call calculocalnorm"); calclocalnorm(nncol, nnrow, a, nrow, ncol, ao, a1); print_current_time("end calculocalnorm"); MPI_Allreduce(ao,aosum, nrow, MPI_DOUBLE, MPI_SUM, controls->row_comm); MPI_Allreduce(a1,a1sum, ncol, MPI_DOUBLE, MPI_SUM, controls->col_comm); double aolocal = vnormi(aosum, nrow); double a1local = vnormi(a1sum, ncol); double aoglobal, a1global; MPI_Allreduce(&aolocal, &aoglobal, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); MPI_Allreduce(&a1local, &a1global, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); *norm1 = a1global; *norminf = aoglobal; MP_message("end MP_calcnorm"); } void MP_calcvnorm(double b[], PPARMS parms, PCONTROLS controls, double *norm1, double *norminf) { int i, j; int nrow = controls->nrow; MP_message("enter MP_calcvnorm"); double bo=0; double b1=0; for(i=0;i<nrow; i++)b1 += fabs(b[i]); bo = vnormi(b, nrow); double b1sum, boglobal; MPI_Allreduce(&b1,&b1sum, 1, MPI_DOUBLE, MPI_SUM, controls->col_comm); MPI_Allreduce(&bo, &boglobal, 1, MPI_DOUBLE, MPI_MAX, controls->col_comm); *norm1 = b1sum; *norminf = boglobal; MP_message("end MP_calcvnorm"); } void printmat( int n, double a[n][RDIM]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n+1;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void printsqmat( int n, double a[n][n]) { int i, j; for(i=0;i<n;i++){ printf("%3d: ", i); for(j=0;j<n;j++) printf(" %10.3e", a[i][j]); printf("\n"); } printf("\n"); } void backward_sub(int n,double a[n][RDIM], double b[]) { int i,j,k; for (i=0;i<n;i++)b[i] = a[i][n]; for(j=n-2;j>=0;j--) for(k=j+1;k<n;k++) b[j] -= b[k]*a[j][k]; } void lu( int n, double a[n][RDIM], double b[]) { int i, j, k; for(i=0;i<n-1;i++){ // select pivot double amax = fabs(a[i][i]); int p=i; for(j=i+1;j<n;j++){ if (fabs(a[j][i]) > amax){ amax = fabs(a[j][i]); p = j; } } // exchange rows if (p != i){ for(j=i;j<n+1;j++){ double tmp = a[p][j]; a[p][j] = a[i][j]; a[i][j]=tmp; } } // normalize row i double ainv = 1.0/a[i][i]; // fprintf(stderr,"%d %e\n", i, ainv); for(k=i+1;k<n+1;k++) a[i][k]*= ainv; // subtract row i from all lower rows for(j=i+1;j<n;j++){ // fprintf(stderr,"j=%d \n",j); for(k=i+1;k<n+1;k++) a[j][k] -= a[j][i] * a[i][k]; } } printmat(n,a); a[n-1][n] /= a[n-1][n-1]; backward_sub(n,a,b); } int MP_find_pivot(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int index; // fprintf(stderr, "%d: hav_current_col = %d\n", MP_myprocid(), // have_current_col(current, parms, controls)); if (have_current_col(current, parms, controls)){ int first=first_row(current, parms, controls); double fmax =0; int imax = -1; int i; int ii = local_colid(current, parms, controls); for(i=first; i< controls->nrow;i++){ if (fabs(a[i][ii]) > fmax){ fmax = fabs(a[i][ii]); imax = i; } } index = global_rowid(imax, parms, controls);// <- wrong!! MP_max_and_maxloc(&fmax, &index, controls->col_comm); } MPI_Bcast(&index,sizeof(int),MPI_BYTE, pcolid(current,parms,controls), controls->row_comm); MPI_Barrier(MPI_COMM_WORLD); // fprintf(stderr,"MP find pivot returns %d\n", index); return index; } int MP_find_pivot_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current) { int index; // fprintf(stderr, "%d: hav_current_col = %d\n", MP_myprocid(), // have_current_col(current, parms, controls)); if (have_current_col(current, parms, controls)){ int first=first_row(current, parms, controls); double fmax =0; int imax = -1; int i; int ii = local_colid(current, parms, controls)%nncol; #if 0 for(i=first; i< controls->nrow;i++){ if (fabs(a[ii][i]) > fmax){ fmax = fabs(a[ii][i]); imax = i; } } #else if (controls->nrow-first > 0){ imax = cblas_idamax(controls->nrow-first, a[ii]+first, 1)+first; fmax = fabs(a[ii][imax]); }else{ imax = 0; fmax = -1; } #endif index = global_rowid(imax, parms, controls);// <- wrong!! // fprintf(stderr,"find_pivot local max, index =%10.3e, %d\n", fmax, index); MP_max_and_maxloc(&fmax, &index, controls->col_comm); // fprintf(stderr,"find_pivot g max, index =%10.3e, %d\n", fmax, index); } return index; } void local_row_exchange(int nnrow,int nncol,double a[nnrow][nncol], int row1,int row2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=0;i<nncol;i++){ double tmp=ap1[i]; ap1[i]=ap2[i]; ap2[i]=tmp; } } void local_row_exchange_blocked(int nnrow,int nncol,double a[nnrow][nncol], int row1,int row2, int c1, int c2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=c1;i<c2;i++){ double tmp=ap1[i]; ap1[i]=ap2[i]; ap2[i]=tmp; } } void local_row_exchange_blocked_transposed(int nnrow,int nncol, double a[nncol][nnrow], int row1,int row2, int c1, int c2) { int i; double *ap1 = a[row1]; double *ap2 = a[row2]; for(i=c1;i<c2;i++){ double tmp=a[i][row1]; a[i][row1]=a[i][row2]; a[i][row2]=tmp; } } void MP_swap_row_ptop(int nnrow, int nncol, double a[nnrow][nncol], int myrow, int procswap, PCONTROLS controls) { double atmp[nncol]; int i; MPI_Status mpstatus; for(i=0;i<nncol;i++)atmp[i]=a[myrow][i]; MPI_Sendrecv(atmp, nncol, MPI_DOUBLE, procswap, MPSWAPTAG, a[myrow],nncol, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); } void MP_swap_row_ptop_blocked(int nnrow, int nncol, double a[nnrow][nncol], int myrow, int procswap, PCONTROLS controls, int c1, int c2) { double atmp[nncol]; int i; MPI_Status mpstatus; if (c2 > c1){ int ndata = c2-c1; for(i=0;i<ndata;i++)atmp[i]=a[myrow][i+c1]; MPI_Sendrecv(atmp, ndata, MPI_DOUBLE, procswap, MPSWAPTAG, a[myrow]+c1,ndata, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); } } void MP_swap_row_ptop_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], int myrow, int procswap, PCONTROLS controls, int c1, int c2) { double atmp[nncol]; double atmp2[nncol]; int i; MPI_Status mpstatus; if (c2 > c1){ int ndata = c2-c1; for(i=0;i<ndata;i++)atmp[i]=a[i+c1][myrow]; MPI_Sendrecv(atmp, ndata, MPI_DOUBLE, procswap, MPSWAPTAG, atmp2,ndata, MPI_DOUBLE, procswap, MPSWAPTAG, controls->col_comm, &mpstatus); for(i=0;i<ndata;i++)a[i+c1][myrow]=atmp2[i]; } } int MP_swap_rows(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int pivot) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange(nnrow,nncol,a,current_lloc,pivot_lloc); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop(nnrow,nncol,a,current_lloc,pivot_proc, controls); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop(nnrow,nncol,a,pivot_lloc,current_proc, controls); } } int MP_swap_rows_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int pivot, int c1, int c2) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange_blocked(nnrow,nncol,a,current_lloc, pivot_lloc,c1,c2); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked(nnrow,nncol,a,current_lloc,pivot_proc, controls,c1,c2); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked(nnrow,nncol,a,pivot_lloc,current_proc, controls,c1,c2); } } int MP_swap_rows_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int pivot, int c1, int c2) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; convert_global_index_to_local_rows(current, &current_proc, &current_lloc, parms, controls); convert_global_index_to_local_rows(pivot, &pivot_proc, &pivot_lloc, parms, controls); // dprintf(9,"swap_rows_transposed cp,pp,rank_in_col=%d %d %d\n", // current_proc, pivot_proc,controls->rank_in_col); if (current_proc == pivot_proc){ if (current_proc == controls->rank_in_col) local_row_exchange_blocked_transposed(nnrow,nncol,a,current_lloc, pivot_lloc,c1,c2); }else if (current_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked_transposed(nnrow,nncol,a, current_lloc,pivot_proc, controls,c1,c2); }else if (pivot_proc == controls->rank_in_col){ MP_swap_row_ptop_blocked_transposed(nnrow,nncol,a,pivot_lloc, current_proc, controls,c1,c2); } // dprintf(9,"swap_rows_transposed end\n"); } int MP_scale_row(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ double scale = a[current_lrow][current_lcol]; int i; MPI_Bcast(&scale,sizeof(double),MPI_BYTE, current_pcol, controls->row_comm); scale = 1.0/scale; for(i=0;i<nncol;i++)a[current_lrow][i] *= scale; } } int MP_scale_row_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ double scale = a[current_lrow][current_lcol]; int i; if (! singlecol){ MPI_Bcast(&scale,sizeof(double),MPI_BYTE, current_pcol, controls->row_comm); } scale = 1.0/scale; if (controls->rank_in_row != current_pcol){ for(i=c1;i<c2;i++)a[current_lrow][i] *= scale; }else{ for(i=c1;i<c2;i++) if (i != current_lcol) a[current_lrow][i] *= scale; } } } int MP_scale_row_blocked_using_scale(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int singlecol,double scaleval) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ int i; for(i=c1;i<c2;i++)a[current_lrow][i] *= scaleval; } } double scaleval(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); double scale = a[current_lrow][current_lcol]; return 1.0/scale; } int MP_construct_scalevector(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nb, double scale[]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ int i; for(i=0;i<nb;i++){ scale[i]= 1.0/ a[current_lrow+i][current_lcol+i]; } } } int MP_scale_row_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int c1, int c2) { int current_prow; int current_lrow; int current_pcol; int current_lcol; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); current_lcol %= nncol; if (current_prow == controls->rank_in_col){ double scale = a[current_lcol][current_lrow]; int i; scale = 1.0/scale; if (controls->rank_in_row != current_pcol){ for(i=c1;i<c2;i++)a[i][current_lrow] *= scale; }else{ for(i=c1;i<c2;i++) if (i != current_lcol) a[i][current_lrow] *= scale; } } } int MP_update_single(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startcol; int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol > controls->rank_in_row){ startcol = (current_lcol/nb +1)*nb; }else if (current_pcol == controls->rank_in_row){ startcol =current_lcol+1; }else{ startcol = (current_lcol/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<ncol-startcol;i++)arow[i]=a[current_lrow][i+startcol]; } MPI_Bcast(arow,sizeof(double)*(ncol-startcol),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=startcol;i<ncol;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-startcol]; } } } int MP_update_single_blocked(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[current_lrow][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=c1;i<c2;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-c1]; } } } static void vsmulandsub0(int r0, int r1, double al[], double ar[], double s) { int i; for (i=r0;i<r1;i++)al[i] -= ar[i]*s; } static void vsmulandsub(int r0, int r1, double al[], double ar[], double s) { int j; if (r1 - r0 < 16){ vsmulandsub0(r0, r1, al, ar, s); return; } while (r0 & 7){ al[r0] -= ar[r0]*s; r0++; } while (r1 & 7){ al[r1-1] -= ar[r1-1]*s; r1--; } v2df * arv = (v2df*) (ar+r0); v2df * alv = (v2df*) (al+r0); v2df ss = (v2df){s,s}; // for(j=r0;j<r1;j++) // al[j] -= ar[j]*s; for(j=0;j<(r1-r0)/2;j+=4){ alv[j] -= arv[j]*ss; alv[j+1] -= arv[j+1]*ss; alv[j+2] -= arv[j+2]*ss; alv[j+3] -= arv[j+3]*ss; __builtin_prefetch(alv+j+32,1,3); __builtin_prefetch(arv+j+32,0,0); } } int MP_update_single_blocked_transposed(int nnrow, int nncol, double a[nncol][nnrow], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, double * arow, double * acol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j; int ncol = controls->ncol+1; int nrow = controls->nrow; // dprintf(9,"enter MP_update_single_blocked_transposed\n"); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); current_lcol %= nncol; int startrow; int nb = parms->nb; if (current_prow > controls->rank_in_col){ startrow = (current_lrow/nb +1)*nb; }else if (current_prow == controls->rank_in_col){ startrow =current_lrow+1; }else{ startrow = (current_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i+startrow]=a[current_lcol][i+startrow]; } // if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[i+c1][current_lrow]; // } // Since the value of current_prow is broadcasted, there // is no need for other processors to execute the above // loop. However, OpenMPI complains.... // dprintf(9," MP_update_single_blocked_transposed bcast %d\n",c2-c1); MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); // dprintf(9," MP_update_single_blocked_transposed end bcast %d\n",c2-c1); // the following way does not look optimal yet... int ii; #pragma omp parallel for private(ii) schedule(static) for(ii=0;ii<4;ii++){ int k; int nr = (nrow-startrow)/4; int i1 = nr*ii+startrow; int i2 = i1+nr; if (ii==3) i2 =nrow; for (k=c1;k<c2;k++){ vsmulandsub(i1, i2, a[k],acol,arow[k-c1]); } } // dprintf(9,"end MP_update_single_blocked_transposed\n"); } int MP_update_single_blocked_global(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int c1, int c2, int global_startrow) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j; double arow[nncol]; double acol[nnrow]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++)acol[i]=a[i+startrow][current_lcol]; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++)arow[i]=a[current_lrow][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1),MPI_BYTE, current_prow, controls->col_comm); MPI_Bcast(acol,sizeof(double)*(nrow-startrow),MPI_BYTE, current_pcol, controls->row_comm); for (i=c1;i<c2;i++){ for (j=startrow;j<nrow;j++){ a[j][i] -= acol[j-startrow]*arow[i-c1]; } } } int MP_process_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[startrow]+current_lcol), nbin, acol, nrow-startrow, nbin); } MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); return nrow-startrow; } int MP_prepare_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++) for(j=0;j<nbin;j++) acol[i][j]=a[i+startrow][current_lcol+j]; } return nrow-startrow; } int MP_update_multiple_blocked_global_using_lmat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double acol[][nbin]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++) for(j=0;j<nbin;j++) arow[j][i]=a[current_lrow+j][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); } int MP_bcast_umat(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double arow[nbin][c2-c1]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_prow == controls->rank_in_col){ copysubmat(nncol, (double(*)[])(a[current_lrow]+c1), c2-c1, arow, nbin, c2-c1); } MP_mybcast(arow,sizeof(double)*(c2-c1)*nbin, current_prow, controls->col_comm); } int startrow_for_update(PPARMS parms, PCONTROLS controls, int current, int global_startrow) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } return startrow; } int MP_update_using_lu(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, double acol[][nbin], double arow[nbin][c2-c1]) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } BEGIN_TIMER(timer); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); END_TIMER(timer,19,((double)(nrow-startrow))*(c2-c1)*nbin*2); } int MP_update_multiple_blocked_global(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, int c1, int c2, int global_startrow, int singlecol) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1]; double acol[nnrow][nbin]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ for (i=0;i<nrow-startrow;i++) for(j=0;j<nbin;j++) acol[i][j]=a[i+startrow][current_lcol+j]; } if (! singlecol){ MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); } if (current_prow == controls->rank_in_col){ for (i=0;i<c2-c1;i++) for(j=0;j<nbin;j++) arow[j][i]=a[current_lrow+j][i+c1]; } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); } int MP_update_multiple_blocked_global_withacol(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int current, int nbin, double acol[nnrow][nbin], int c1, int c2, int global_startrow, int singlecol) { int current_prow; int current_lrow; int gs_prow; int gs_lrow; int current_pcol; int current_lcol; int i,j,k; double arow[nbin][c2-c1] __attribute__((aligned(128))); int ncol = controls->ncol+1; int nrow = controls->nrow; BEGIN_TIMER(timer0); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_rows(global_startrow, &gs_prow, &gs_lrow, parms, controls); convert_global_index_to_local_cols(current, &current_pcol, &current_lcol, parms, controls); int startrow; int nb = parms->nb; if (gs_prow > controls->rank_in_col){ startrow = (gs_lrow/nb+1)*nb; }else if (gs_prow == controls->rank_in_col){ startrow =gs_lrow; }else{ startrow = (gs_lrow/nb)*nb; } if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[startrow]+current_lcol), nbin, acol, nrow-startrow, nbin); } if (! singlecol){ MPI_Bcast(acol,sizeof(double)*(nrow-startrow)*nbin,MPI_BYTE, current_pcol, controls->row_comm); } if (current_prow == controls->rank_in_col){ copysubmat(nncol, (double(*)[])(a[current_lrow]+c1), c2-c1, arow, nbin,c2-c1); } MPI_Bcast(arow,sizeof(double)*(c2-c1)*nbin,MPI_BYTE, current_prow, controls->col_comm); END_TIMER(timer0,27,((double)(nrow-startrow))*(c2-c1)); BEGIN_TIMER(timer); mydgemm(nrow-startrow, c2-c1, nbin, -1.0, &(acol[0][0]), nbin, &(arow[0][0]), c2-c1, 1.0, &(a[startrow][c1]), nncol ); if (nbin == 8){ END_TIMER(timer,13,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 16){ END_TIMER(timer,20,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 32){ END_TIMER(timer,21,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 64){ END_TIMER(timer,22,((double)(nrow-startrow))*(c2-c1)*nbin*2); }else if (nbin == 128){ END_TIMER(timer,23,((double)(nrow-startrow))*(c2-c1)*nbin*2); } END_TIMER(timer,26,((double)(nrow-startrow))*(c2-c1)*nbin*2); } static void MP_solve_triangle_for_unit_mat_internal(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j,k; for(ii=0;ii<m;ii++){ for(j=ii+1;j<m;j++){ v2df acopy = (v2df){-a[j][ii],-a[j][ii]}; v2df* src = (v2df*)b[ii]; v2df* dest = (v2df*)b[j]; for(k=0;k<j/2;k++)dest[k] += acopy*src[k]; if(j&1) b[j][j-1] -= a[j][ii]*b[ii][j-1]; } } } static void MP_solve_triangle_for_unit_mat_internal_omp(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j; for(ii=0;ii<m;ii++){ #pragma omp parallel for private(j) for(j=ii+1;j<m;j++){ int k; v2df acopy = (v2df){-a[j][ii],-a[j][ii]}; v2df* src = (v2df*)b[ii]; v2df* dest = (v2df*)b[j]; for(k=0;k<j/2;k++)dest[k] += acopy*src[k]; if(j&1) b[j][j-1] -= a[j][ii]*b[ii][j-1]; } } } void MP_solve_triangle_for_unit_mat(int nncol, double a[][nncol], int nb, double b[][nb], int m); static void MP_solve_triangle_for_unit_mat_recursive(int nncol, double a[][nncol], int nb, double b[][nb], int m) { int i,ii,j,k; if (m < 128){ MP_solve_triangle_for_unit_mat_internal(nncol, a, nb, b,m); return; } const int mhalf = m/2; MP_solve_triangle_for_unit_mat_recursive(nncol, a, nb, b,mhalf); mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), nncol, &(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb ); double bwork[mhalf][mhalf] __attribute__((aligned(128))); double bwork2[mhalf][mhalf] __attribute__((aligned(128))); for (j=0;j<mhalf;j++) for (k=0;k<mhalf;k++)bwork[j][k]=0.0; for (j=0;j<mhalf;j++)bwork[j][j]=1.0; MP_solve_triangle_for_unit_mat_recursive(nncol, (double(*)[])(&a[mhalf][mhalf]), mhalf, bwork,mhalf); for(i=0;i<mhalf;i++) for(j=0;j<mhalf;j++) bwork2[i][j]=b[i+mhalf][j]; mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf, (double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb ); for (j=0;j<mhalf;j++) for (k=0;k<j+1;k++)b[mhalf+j][mhalf+k]=bwork[j][k]; } void MP_solve_triangle_for_unit_mat(int nncol, double a[][nncol], int nb, double b[nb][nb], int m) { int ii,j,k; BEGIN_TIMER(timer); for (j=0;j<nb;j++) for (k=0;k<nb;k++)b[j][k]=0.0; for (j=0;j<nb;j++)b[j][j]=1.0; MP_solve_triangle_for_unit_mat_recursive(nncol, (double(*)[]) (&a[0][0]), nb, b, m); END_TIMER(timer,35,((double)(nb))*nb*nb); } int MP_solve_triangle(int nncol, double a[][nncol], int ncols, int m, double acol[m][m]) { int i,ii,j,k; // current =ii // c0=i+m // c1=iend // r0=ii+1 // r1 = i+m double b[m][m] __attribute__((aligned(128))); double awork[m][ncols] __attribute__((aligned(128))); MP_solve_triangle_for_unit_mat(m,acol,m,b,m); for(j=0;j<m;j++){ for (k=0;k<ncols;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } #if 1 mydgemm(m, ncols, m, 1.0, &(b[0][0]), m, &(awork[0][0]), ncols, 0.0, &(a[0][0]), nncol ); #else for (k=0;k<ncols;k++){ for(j=0;j<m;j++) for(ii=0;ii<m;ii++) a[j][k]+= b[j][ii]*awork[ii][k]; } #endif } int MP_solve_triangle_using_inverse(int nncol, double a[][nncol], int ncols, int m, double b[m][m]) { int i,ii,j,k; double awork[m][ncols] __attribute__((aligned(128))); v2df zerov=(v2df){0.0,0.0}; if (ncols<32){ for(j=0;j<m;j++){ for (k=0;k<ncols;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } }else{ copysubmat(nncol,a,ncols,awork,m,ncols); } mydgemm(m, ncols, m, 1.0, &(b[0][0]), m, &(awork[0][0]), ncols, 0.0, &(a[0][0]), nncol ); } int MP_calculate_ld_old(int m, double a[][m], int ncols, double b[m][m], int current, PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int current_prow, current_lrow; double awork[ncols][m]; for(j=0;j<ncols;j++){ for (k=0;k<m;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); MPI_Bcast(b,sizeof(double)*m*m,MPI_BYTE, current_prow, controls->col_comm); #if 1 mydgemm(ncols, m, m, 1.0, &(awork[0][0]), m, &(b[0][0]), m, 0.0, &(a[0][0]), m ); #else for (k=0;k<m;k++){ for(j=0;j<ncols;j++) for(ii=0;ii<m;ii++) a[j][k]+= awork[j][ii]*b[ii][k]; } #endif } int MP_calculate_ld_phase1(int m, double a[][m], int ncols, double b[m][m], int current, PCONTROLS controls, PPARMS parms) // broadcast diagnal panel (already triangle) // so that it can then be multiplied with L panel { int i,ii,j,k; int current_prow, current_lrow; BEGIN_TIMER(timer); convert_global_index_to_local_rows(current, &current_prow, &current_lrow, parms, controls); MPI_Bcast(b,sizeof(double)*m*m,MPI_BYTE, current_prow, controls->col_comm); END_TIMER(timer,38,(m+0.0)*m); } int MP_calculate_ld_phase2(int m, double a[][m], int ncols, double awork[][m], double b[m][m], int current, PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int current_prow, current_lrow; BEGIN_TIMER(timer); for(j=0;j<ncols;j++){ for (k=0;k<m;k++){ awork[j][k]=a[j][k]; a[j][k]=0; } } mydgemm(ncols, m, m, 1.0, &(awork[0][0]), m, &(b[0][0]), m, 0.0, &(a[0][0]), m ); END_TIMER(timer,6,((double)(ncols))*m*m*2); } int MP_calculate_ld(int m, double a[][m], int ncols, double awork[][m], double b[m][m], int current, PCONTROLS controls, PPARMS parms) { MP_calculate_ld_phase1(m, a, ncols, b, current, controls, parms); MP_calculate_ld_phase2(m, a, ncols, awork, b, current, controls, parms); } int MP_update_multiple_using_diagonal(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int c1, int c2,int nrows, double acolinv[nrows][nrows]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ MP_solve_triangle_using_inverse(nncol, (double(*)[])(&a[current_lrow][c1]), c2-c1, nrows,acolinv); } } int MP_store_diagonal_inverse(int nnrow, int nb, double dinv[nnrow][nb], PPARMS parms, PCONTROLS controls, int firstrow, double acolinv[nb][nb]) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ for (i=0;i<nb;i++){ for(j=0;j<nb;j++){ dinv[current_lrow+i][j]=acolinv[i][j]; } } } } int MP_process_diagonal(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (current_pcol == controls->rank_in_row){ int endrow; int nrhalf = nrows/2; endrow = current_lrow+nrows; for (i=0;i<endrow-current_lrow;i++){ v2df * src = (v2df*)&(a[i+current_lrow][current_lcol]); v2df *dest = (v2df*)&(acol[i][0]); for (ii=0;ii<nrhalf;ii++){ // acol[i][ii]=a[i+current_lrow][current_lcol+ii]; dest[ii]=src[ii]; // need to be fixed } } } if (current_pcol == controls->rank_in_row){ #if 0 dprintf("update_local firstrow=%d\n", firstrow); for(i=0;i<nrows;i++){ fprintf(stderr, "i= %2d:", i); for(j=0;j<nrows;j++){ fprintf(stderr, " %10.3e",acol[i][j]); } fprintf(stderr,"\n"); } #endif MP_solve_triangle_for_unit_mat(nrows,acol,nrows,acolinv,nrows); } if (! singlecol){ MPI_Bcast(acolinv,sizeof(double)*nrows*nrows,MPI_BYTE, current_pcol, controls->row_comm); } } } int MP_process_diagonal_phase1(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows] __attribute__((aligned(128))); int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (current_pcol == controls->rank_in_row){ copysubmat(nncol, (double(*)[])(a[current_lrow]+current_lcol), nrows, acol, nrows, nrows); MP_solve_triangle_for_unit_mat(nrows,acol,nrows,acolinv,nrows); } } } int MP_process_diagonal_phase2(int nnrow, int nncol, double a[nnrow][nncol], PPARMS parms, PCONTROLS controls, int firstrow, int nrows, double acolinv[nrows][nrows], int singlecol) { int current_prow; int current_lrow; int current_pcol; int current_lcol; int i,j,ii; double acol[nrows][nrows]; int ncol = controls->ncol+1; int nrow = controls->nrow; convert_global_index_to_local_rows(firstrow, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(firstrow, &current_pcol, &current_lcol, parms, controls); if (current_prow == controls->rank_in_col){ if (! singlecol){ MPI_Bcast(acolinv,sizeof(double)*nrows*nrows,MPI_BYTE, current_pcol, controls->row_comm); } } } void backward_sub_mpi(int nnrow, int nncol, double a[nnrow][nncol],double b[], PCONTROLS controls, PPARMS parms) { int i,j,k; int previous_pcol = -1; int n = controls->nrow*parms->nprow; int current_prow; int current_lrow; int current_pcol; int current_lcol; MPI_Status status; for (i=0;i<controls->nrow;i++)b[i] = a[i][controls->ncol]; for(i=n-1;i>=1;i--){ convert_global_index_to_local_rows(i, &current_prow, &current_lrow, parms, controls); convert_global_index_to_local_cols(i, &current_pcol, &current_lcol, parms, controls); if (previous_pcol == -1) previous_pcol = current_pcol; if (current_pcol != previous_pcol){ // process column changed, current column should // receive b from previous column if(controls->rank_in_row == previous_pcol){ MPI_Send(b, controls->nrow, MPI_DOUBLE, current_pcol, MPBSTAG,controls->row_comm); }else if (controls->rank_in_row == current_pcol){ MPI_Recv(b, controls->nrow, MPI_DOUBLE, previous_pcol, MPBSTAG,controls->row_comm, &status); } } if(controls->rank_in_row == current_pcol){ // have the current column of a double btmp = b[current_lrow]; MPI_Bcast(&btmp,1,MPI_DOUBLE,current_prow, controls->col_comm); int jmax; int nb = parms->nb; if (controls->rank_in_col == current_prow){ jmax = current_lrow; }else if (controls->rank_in_col < current_prow){ jmax = ((current_lrow/nb)+1)*nb; } else{ jmax = (current_lrow/nb)*nb; } for(j=0;j<jmax;j++){ b[j] -= btmp*a[j][current_lcol]; } } previous_pcol=current_pcol; } MPI_Bcast(b,controls->nrow,MPI_DOUBLE, current_pcol, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; } void backward_sub_blocked_mpi(int nnrow, int nncol, double a[nnrow][nncol], int nb, double dinv[nnrow][nb], double b[], PCONTROLS controls, PPARMS parms) { int i,ii,j,k; int previous_pcol = -1; int n = controls->nrow*parms->nprow; int current_prow; int current_lrow; int current_pcol; int current_lcol; MPI_Status status; // dprintf(9,"enter backward_sub_blocked_mpi\n"); for (i=0;i<controls->nrow;i++)b[i] = a[i][controls->ncol]; for(ii=n-1;ii>=1;ii-=nb){ // dprintf(9,"ii=%d\n", ii); convert_global_index_to_local_cols(ii, &current_pcol, &current_lcol, parms, controls); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); if (previous_pcol == -1) previous_pcol = current_pcol; if (parms->npcol > 1){ if (ii != n-1){ if(controls->rank_in_row == previous_pcol){ MPI_Send(b, controls->nrow, MPI_DOUBLE, current_pcol, MPBSTAG,controls->row_comm); }else if (controls->rank_in_row == current_pcol){ MPI_Recv(b, controls->nrow, MPI_DOUBLE, previous_pcol, MPBSTAG,controls->row_comm, &status); } } } // dprintf(9,"send/receive end ii=%d\n", ii); if(controls->rank_in_row == current_pcol){ double bwork[nb]; int jend; if (controls->rank_in_col == current_prow){ int jmin = current_lrow - nb+1; #if 0 for(k=0;k<nb;k++){ int jmax; jmax = current_lrow-k; bwork[k]=b[jmax]; double btmp=bwork[k]; for(j=jmin;j<jmax;j++){ b[j] -= btmp*a[j][current_lcol-k]; } } #endif #if 0 for(j=current_lrow-1;j>=current_lrow-nb+1;j--){ int r0 = current_lrow-j+1; int c0 = current_lcol-j+1; for(k=0;k<current_lrow-j;k++){ b[j] -= b[current_lrow-k]*a[j][current_lcol-k]; } } #endif #if 1 for(j=current_lrow-1;j>=current_lrow-nb+1;j--){ int r0 = j+1; int c0 = current_lcol-current_lrow+r0; int kmax = current_lrow-j; double btmp = b[j]; double * bp = b+r0; double * ap = &(a[j][c0]); // double * apn = &(a[j-1][c0]); // for (k=0;k<kmax; k+= 16)__builtin_prefetch(apn+k,0,0); for(k=0;k<kmax;k++){ btmp -= bp[k]*ap[k]; } b[j]=btmp; } #endif for(k=0;k<nb;k++){ int jmax; jmax = current_lrow-k; bwork[k]=b[jmax]; } // for(j=jmin;j<=current_lrow;j++){ // dprintf(9,"ad[%d] = %10.4g %10.4g\n", j,a[j][current_lcol-1], // a[j][current_lcol]); // } jend = jmin; if (parms->nprow > 1) MPI_Bcast(bwork,nb,MPI_DOUBLE,current_prow,controls->col_comm); // dprintf(9,"source bcast end ii=%d\n", ii); }else{ if (parms->nprow > 1) MPI_Bcast(bwork,nb,MPI_DOUBLE,current_prow,controls->col_comm); // dprintf(9,"dest bcast end ii=%d\n", ii); if (controls->rank_in_col < current_prow){ jend = ((current_lrow/nb)+1)*nb; } else{ jend = (current_lrow/nb)*nb; } } double bwork2[jend]; double bwork3[jend]; #if 0 for(j=0;j<jend;j++){ bwork2[j]=0; for(k=0;k<nb;k++){ bwork2[j] += bwork[k]*a[j][current_lcol-k]; } } #endif double bwork4[nb]; for(k=0;k<nb;k++) bwork4[k]=bwork[nb-1-k]; #pragma omp parallel for private(j,k) for(j=0;j<jend;j++){ double btmp=0; double *ap=&(a[j][current_lcol-nb+1]); for(k=0;k<nb;k++){ btmp += bwork4[k]*ap[k]; } bwork2[j]=btmp; } #define DUPOSTMUL #ifndef DUPOSTMUL for(j=0;j<jend;j++) b[j] -= bwork2[j]; #else #pragma omp parallel for private(j,k) schedule(static) for(j=0;j<jend;j++){ bwork3[j]=0; int jmin = (j /nb)*nb; for(k=0;k<nb;k++){ bwork3[j] += bwork2[k+jmin]*dinv[j][k]; } } for(j=0;j<jend;j++) b[j] -= bwork3[j]; #endif // dprintf(9,"calc end ii=%d\n", ii); } previous_pcol=current_pcol; } if (parms->npcol > 1) MPI_Bcast(b,controls->nrow,MPI_DOUBLE, current_pcol, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; // dprintf(9,"backward_sub all t end \n"); } void check_solution_mpi(int nnrow, int nncol, double a[nnrow][nncol], double b[], double bcopy[], PCONTROLS controls, PPARMS parms) { int i,j,k; int nrow = controls->nrow; int ncol=controls->ncol; MPI_Allgather(b,nrow,MPI_DOUBLE,bcopy,nrow,MPI_DOUBLE, controls->col_comm); double bcopy2[ncol]; for(i=0;i<ncol;i++){ int iglobal=global_colid(i, parms,controls); int rowpid, rowlid; convert_global_index_to_local_rows(iglobal, &rowpid, &rowlid, parms, controls); int index= nrow*rowpid+rowlid; bcopy2[i]=bcopy[index]; // dprintf(9,"b[%d]=%g %g\n", i, bcopy2[i], bcopy[i]); } for(i=0;i<nrow;i++){ double btmp=0; for(j=0;j<ncol;j++) { btmp+= a[i][j]*bcopy2[j]; // dprintf(9,"a[%d][%d]=%g, bcopy2=%g %g\n", i, j, // a[i][j], bcopy2[j],btmp); } bcopy[i]=btmp; } MPI_Allreduce(bcopy,b,nrow,MPI_DOUBLE, MPI_SUM, controls->row_comm); for (i=0;i<controls->nrow;i++)a[i][controls->ncol] = b[i]; } double print_errors(double b[], double b0[], PCONTROLS controls, PPARMS parms) { int i,j,k; int nrow=controls->nrow; double esum = 0; double emax = 0; for (i=0;i<nrow;i++){ double err = b[i]-b0[i]; esum+= err*err; if (emax < fabs(err)) emax = fabs(err); } dprintf(9," esum = %e\n", esum); double totalerror=0; double totalemax=0; MPI_Reduce(&esum,&totalerror, 1, MPI_DOUBLE, MPI_SUM, 0, controls->col_comm); MPI_Reduce(&emax,&totalemax, 1, MPI_DOUBLE, MPI_MAX, 0, controls->col_comm); if (controls->rank_in_col == 0){ dprintf(0,"Errors = %e %e %e %e\n\n", sqrt(totalerror), totalemax, esum, emax); printf("\n Error = %e %e\n", sqrt(totalerror), totalemax); } return totalemax; } double Mmax(double a, double b) { if (a>b){ return a; }else{ return b; } } void HPLlogprint(FILE * f, int N, int NB, int nprow, int npcol, double elapsed) { fprintf(f, "%s%s\n", "======================================", "======================================" ); fprintf(f,"%s%s\n", "T/V N NB P Q", " Time Gflops" ); fprintf(f, "%s%s\n", "--------------------------------------", "--------------------------------------" ); double Gflops = ( ( (double)(N) / 1.0e+9 ) * ( (double)(N) / elapsed ) ) * ( ( 2.0 / 3.0 ) * (double)(N) + ( 3.0 / 2.0 ) ); fprintf(f,"W%c%1d%c%c%1d%c%1d%12d %5d %5d %5d %18.2f %18.3e\n", 'R',0,'1', 'R', 2, 'C', 32, N, NB, nprow, npcol, elapsed, Gflops ); } void HPLerrorprint(FILE * f, double emax, double a1, double ao, double b1, double bo, double n) { double epsil =2e-16; double thrsh = 16; double resid1 = emax / (epsil * a1 * n ); double resid2 = emax / (epsil * a1 * b1 ); double resid3 = emax / (epsil * ao * bo * n); // fprintf(f,"err= %e Anorms= %e %e Xnorms= %e %e\n",resid0, a1, ao, b1, bo); int kpass =0; int kfail= 0; if( ( Mmax( resid1, resid2 ) < thrsh ) && ( resid3 < thrsh ) ) (kpass)++; else (kfail)++; fprintf(f,"%s%s\n", "--------------------------------------", "--------------------------------------" ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_1 * N ) = ", resid1, " ...... ", ( resid1 < thrsh ? "PASSED" : "FAILED" ) ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_1 * ||x||_1 ) = ", resid2, " ...... ", ( resid2 < thrsh ? "PASSED" : "FAILED" ) ); fprintf(f,"%s%16.7f%s%s\n", "||Ax-b||_oo / ( eps * ||A||_oo * ||x||_oo ) = ", resid3, " ...... ", ( resid3 < thrsh ? "PASSED" : "FAILED" ) ); if( ( resid1 >= thrsh ) || ( resid2 >= thrsh ) || ( resid3 >= thrsh ) ) { fprintf(f,"%s%18.6f\n", "||Ax-b||_oo . . . . . . . . . . . . . . . . . = ", emax ); fprintf(f,"%s%18.6f\n", "||A||_oo . . . . . . . . . . . . . . . . . . . = ", ao ); fprintf(f, "%s%18.6f\n", "||A||_1 . . . . . . . . . . . . . . . . . . . = ", a1 ); fprintf(f,"%s%18.6f\n", "||x||_oo . . . . . . . . . . . . . . . . . . . = ", bo ); fprintf(f,"%s%18.6f\n", "||x||_1 . . . . . . . . . . . . . . . . . . . = ", b1 ); } fprintf(f, "%s%s\n", "======================================", "======================================" ); fprintf(f, "\n%s %6d %s\n", "Finished", 1, "tests with the following results:" ); fprintf(f," %6d %s\n", kpass, "tests completed and passed residual checks," ); fprintf(f," %6d %s\n", kfail, "tests completed and failed residual checks," ); fprintf(f," %6d %s\n", 0, "tests skipped because of illegal input values." ); fprintf(f,"%s%s\n", "--------------------------------------", "--------------------------------------" ); fprintf(f,"\nEnd of Tests.\n" ); fprintf(f,"%s%s\n", "======================================", "======================================" ); } void printmat_MP(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms) { int procid =MP_proccount(); int ii, i, j; MPI_Barrier(MPI_COMM_WORLD); for (ii=0;ii<procid; ii++){ // fprintf(stderr,"printmatmp, ids = %d %d\n", ii, MP_myprocid()); if (MP_myprocid() == ii){ fprintf(stderr,"Printmat_MP: Proc %d, loc = %d %d\n",MP_myprocid(), controls->rank_in_row, controls->rank_in_col); for(i=0;i<controls->nrow;i++){ fprintf(stderr,"%3d: ", i); for(j=0;j<controls->ncol+1;j++) fprintf(stderr," %10.3e", a[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } MPI_Barrier(MPI_COMM_WORLD); } } void lu_forward_onestep_mpi(int i,int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { // dprintf(9,"lu_mpi enter i=%d\n", i); int imax= MP_find_pivot(nnrow, nncol, a, parms, controls, i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,imax,0,nncol); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after swap_rows i=%d\n", i); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,0,nncol,0); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after scale_rows i=%d\n", i); MP_update_single(nnrow, nncol, a, parms, controls,i); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"lu_mpi after update_single i=%d\n", i); } void column_decomposition_mpi(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; // dprintf(9,"column_decomposition_mpi ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c2 ++; for (ii=0;ii<nb;ii++){ i = ii+ifirst; int imax= MP_find_pivot(nnrow, nncol, a, parms, controls, i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,imax,c1,c2); pv[ii]=imax; MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); int cu1, cu2; convert_global_col_range_to_local_range(i+1, ifirst+nb-1,&cu1, &cu2, parms, controls); cu2 ++; MP_update_single_blocked(nnrow, nncol, a, parms, controls,i,cu1,cu2); } } void print2dmat_MP(int nnrow, int nncol, double a[nnrow][nncol], char* s) { int ii, i, j; fprintf(stderr,"Print2dmat_MP: Proc %d, name=%s \n",MP_myprocid(), s); for(i=0;i<nnrow;i++){ fprintf(stderr,"%3d: ", i); for(j=0;j<nncol;j++) fprintf(stderr," %10.3e", a[i][j]); fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } void column_decomposition_mpi_transposed(int ifirst,int nb,int nnrow, int nncol, double a[nncol][nnrow], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; double arow[nncol]; double acol[nnrow]; // dprintf(9,"column_decomposition_mpi ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c1 %= nb; c2 %= nb; c2 ++; int havec=have_current_col(ifirst, parms, controls); if (havec){ // print2dmat_MP(nb, nnrow, a, "transposed a"); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"coldec trans i=%d\n", i); int imax= MP_find_pivot_transposed(nnrow, nb, a, parms, controls, i); // dprintf(9,"coldec trans i=%d imax=%d find pivot end\n", i,imax); MP_swap_rows_blocked_transposed(nnrow, nb, a, parms, controls,i,imax,c1,c2); // dprintf(9,"coldec trans i=%d swap rows end\n", i); pv[ii]=imax; // print2dmat_MP(nb, nnrow, a, "after swap"); MP_scale_row_blocked_transposed(nnrow, nb, a, parms, controls,i,c1,c2); // print2dmat_MP(nb, nnrow, a, "after scale"); // dprintf(9,"coldec trans i=%d scale rows end\n", i); int cu1, cu2; convert_global_col_range_to_local_range(i+1, ifirst+nb-1,&cu1, &cu2, parms, controls); cu1 %= nb; cu2 %= nb; cu2 ++; if(ii<nb-1){ // dprintf(9,"coldec trans i=%d call update\n", i); MP_update_single_blocked_transposed(nnrow, nb, a, parms, controls, i,cu1,cu2,arow,acol); } // dprintf(9,"coldec trans i=%d update end\n", i); } } } void transpose_rowtocol8(int nnrow, int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; int mend; #pragma omp parallel for private(i,j,k) schedule(static) for(i=istart;i<nnrow;i+=m){ double atmp[m][m] __attribute__((aligned(128))); // BEGIN_TSC; for(k=0;k<m;k++){ v2df * ak = (v2df*) a[i+k]; v2df * akk = (v2df*) atmp[k]; asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory"); // asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory"); // __builtin_prefetch(a[i+k+m*2],0,0); // __builtin_prefetch(a[i+k+m*2]+8,0,0); akk[0] =ak[0]; akk[1] =ak[1]; akk[2] =ak[2]; akk[3] =ak[3]; } // END_TSC(t,17); // { // BEGIN_TSC; for(j=0;j<m;j++){ v2df * atk= (v2df*)(at[j]+i); atk[0]=(v2df){atmp[0][j],atmp[1][j]}; atk[1]=(v2df){atmp[2][j],atmp[3][j]}; atk[2]=(v2df){atmp[4][j],atmp[5][j]}; atk[3]=(v2df){atmp[6][j],atmp[7][j]}; } // END_TSC(t2,18); // } int istart) } } void transpose_rowtocol8_0(int nnrow, int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(k=0;k<m;k++){ double *ak = a[i+k]; atmp[0][k] =ak[0]; atmp[1][k] =ak[1]; atmp[2][k] =ak[2]; atmp[3][k] =ak[3]; atmp[4][k] =ak[4]; atmp[5][k] =ak[5]; atmp[6][k] =ak[6]; atmp[7][k] =ak[7]; } for(j=0;j<m;j++){ v2df * atp = (v2df*) atmp[j]; v2df * ap = (v2df*) (at[j]+i); *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void MP_transpose_rowtocol(int nnrow, int nncol, double a[nnrow][nncol],int m, double at[][nnrow], int istart) { if (m == 8){ transpose_rowtocol8(nnrow, nncol, a, at, istart); return; } int i,j,k; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(k=0;k<m;k++){ for(j=0;j<m;j++){ atmp[j][k] =a[i+k][j]; } } for(j=0;j<m;j++){ for(k=0;k<m;k++){ at[j][i+k]=atmp[j][k]; } } } } void transpose_coltorow8(int nnrow,int nncol, double a[nnrow][nncol], double at[][nnrow], int istart) { int i,j,k; const int m=8; double atmp[m][m]; #pragma omp parallel for private(i,j,k,atmp) schedule(static) for(i=istart;i<nnrow;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; // __builtin_prefetch(at[j]+i+m+m,0,0); // inserting prefetch here causes speed down... atmp[0][j] =atj[0]; atmp[1][j] =atj[1]; atmp[2][j] =atj[2]; atmp[3][j] =atj[3]; atmp[4][j] =atj[4]; atmp[5][j] =atj[5]; atmp[6][j] =atj[6]; atmp[7][j] =atj[7]; } for(k=0;k<m;k++){ v2df * atp = (v2df*) atmp[k]; v2df * ap = (v2df*) a[i+k]; *(ap)=*(atp); *(ap+1)=*(atp+1); *(ap+2)=*(atp+2); *(ap+3)=*(atp+3); } } } void MP_transpose_coltorow(int nnrow,int nncol, double a[nnrow][nncol],int m, double at[][nnrow], int istart) { if (m == 8){ transpose_coltorow8(nnrow, nncol, a, at, istart); return; } int i,j,k; double atmp[m][m]; for(i=istart;i<nnrow;i+=m){ for(j=0;j<m;j++){ double * atj = at[j]+i; for(k=0;k<m;k+=2){ atmp[k][j] =atj[k]; atmp[k+1][j] =atj[k+1]; // atmp[k+2][j] =atj[k+2]; // atmp[k+3][j] =atj[k+3]; } } for(k=0;k<m;k++){ double * aik = a[i+k]; for(j=0;j<m;j+=2){ aik[j] = atmp[k][j]; aik[j+1] = atmp[k][j+1]; // aik[j+2] = atmp[k][j+2]; // aik[j+3] = atmp[k][j+3]; } } } } void column_decomposition_mpi_with_transpose(int ifirst,int nb, int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { double awork[nb][nnrow] __attribute__((aligned(128))); int c1, c2; // dprintf(9,"column_decomposition_mpi_wt ifirst=%d nb=%d\n",ifirst,nb); convert_global_col_range_to_local_range(ifirst, ifirst+nb-1,&c1, &c2, parms, controls); c2 ++; int localfirst=first_row(ifirst, parms, controls); // dprintf(9,"column_decomposition_mpi_wt c1=%d c2=%d lfirst=%d\n", // c1,c2,localfirst); BEGIN_TIMER(timer0); MP_transpose_rowtocol(nnrow,nncol, (double(*)[]) (&a[0][c1]), nb, awork,localfirst); END_TIMER(timer0,10,((double)nb)*(nnrow-localfirst)); BEGIN_TIMER(timer1); column_decomposition_mpi_transposed(ifirst,nb,nnrow,nncol, awork, b, pv,controls,parms); END_TIMER(timer1,11,((double)nb)*nb*(nnrow-localfirst)); BEGIN_TIMER(timer2); MP_transpose_coltorow(nnrow,nncol, (double(*)[]) (&a[0][c1]), nb, awork,localfirst); END_TIMER(timer2,12,((double)nb)*(nnrow-localfirst)); } void process_right_part_mpi(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms, int singlecol) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); double acolinv[nb][nb]; MP_process_diagonal(nnrow, nncol, a, parms, controls, ifirst,nb,acolinv,singlecol); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,singlecol); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, ifirst,nb,c1,c2,ifirst+nb,1); } void process_right_part_mpi_withacol(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double acol[nnrow][nb], double b[], int pv[], PCONTROLS controls, PPARMS parms, int singlecol) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); double acolinv[nb][nb]; BEGIN_TIMER(timer2); MP_process_diagonal(nnrow, nncol, a, parms, controls, ifirst,nb,acolinv,singlecol); END_TIMER(timer2,24,((double)nb)*nb*nb/6.0); BEGIN_TIMER(timer3); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,singlecol); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); END_TIMER(timer3,36,((double)nb)*(c2-c1)*2); BEGIN_TIMER(timer0); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); END_TIMER(timer0,24,((double)nb)*nb*(c2-c1)*2); BEGIN_TIMER(timer1); MP_update_multiple_blocked_global_withacol(nnrow, nncol, a, parms, controls, ifirst,nb,acol,c1,c2,ifirst+nb,1); END_TIMER(timer1,25,((double)nb)*nb*(c2-c1)*2); } void process_right_part_mpi_using_dl_old(int ifirst,int nb,int ncols, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int c1, c2; // dprintf(9,"process_rp ifirst=%d nb=%d\n", ifirst,nb); convert_global_col_range_to_local_range(ifirst+nb, ifirst+nb+ncols-1, &c1, &c2, parms, controls); c2++; if (ncols <= 0) c2 = nncol; for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,0); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); // MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, // ifirst,nb,c1,c2,ifirst+nb,0); } void process_right_part_mpi_using_dl(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dl, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"process_rp, i=%d\n", i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); // dprintf(9,"process_rp swap rows end, i=%d\n", i); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,0); } // dprintf(9,"process_rp, ifirst=%d\n", ifirst); // printmat_MP(nnrow, nncol, a, controls, parms); // dprintf(9,"process_rp, enter DTRSM part c1, c2=%d %d\n",c1, c2); // MP_update_multiple_blocked_local(nnrow, nncol, a, parms, controls, // ifirst,c1,c2,nb); MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); // dprintf(9,"process_rp, enter update part c1, c2=%d %d\n",c1, c2); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); // dprintf(9,"process_rp, update part end\n"); // MP_update_multiple_blocked_global(nnrow, nncol, a, parms, controls, // ifirst,nb,c1,c2,ifirst+nb,0); // dprintf(9,"process_rp_using_dl end, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); } void process_right_part_mpi_using_dls(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dls, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, ifirst,c1,c2,nb,acolinv); MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol); } void dump_vector(char * s, int * x, int n) { return; fprintf(stderr,"%s:", s); int i; for(i=0;i<n;i++) fprintf(stderr," %2d", x[i]); fprintf(stderr,"\n"); } int generate_src_and_dst_lists(int ifirst, int pv[], int src[], int dst[], int * length, PCONTROLS controls, PPARMS parms) // creates src + dest list from pivot list { int n =parms->n; int nb=parms->nb; int work[n]; int i; for(i=ifirst; i<n;i++)work[i]=i; for(i=ifirst; i<ifirst+nb;i++){ int p = pv[i-ifirst]; int tmp = work[i]; work[i]=work[p]; work[p]=tmp; } // dump_vector("work", work, n-ifirst); int ii=0; for(i=ifirst; i<n;i++){ if ((work[i] != i) || (i<ifirst+nb)){ dst[ii]=i; src[ii]=work[i]; ii++; } } *length=ii; return ii; } void copysubvect(double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; v2df * s = (v2df*) src; v2df * d = (v2df*) dst; for(j=0;j<length/2;j+=4){ d[j]=s[j]; d[j+1]=s[j+1]; d[j+2]=s[j+2]; d[j+3]=s[j+3]; } } void scalesubvect(double scale, double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; for(j=0;j<length;j++){ // fprintf(stderr,"j=%d src=%e dest=%e\n", j, src[j], dst[j]); dst[j]=src[j]*scale; } } void scalesubvect_old(double scale, double *src, double * dst, int length) // assume that the length is multiple of 8 and // first location is 16-byte-alligned { int j; v2df * s = (v2df*) src; v2df * d = (v2df*) dst; v2df ss = {scale,scale}; for(j=0;j<length/2;j+=4){ d[j]=ss*s[j]; d[j+1]=ss*s[j+1]; d[j+2]=ss*s[j+2]; d[j+3]=ss*s[j+3]; } } void local_swap_using_src_and_dest(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double arow[nb][c2-c1], double scale[], int src[], int dst[], int length, PCONTROLS controls, PPARMS parms) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; int i; int j; convert_global_index_to_local_rows(ifirst, &current_proc, &current_lloc, parms, controls); if(current_proc == controls->rank_in_col){ printf("current_proc=%d, rank=%d\n",current_proc, controls->rank_in_col); copysubmat(nncol, (double(*)[])(a[current_lloc]+c1), c2-c1, arow, nb, c2-c1); } // the following is only for p= (q=?) 1 #pragma omp parallel for private(i) for(i=0;i<length;i++){ if(dst[i]<ifirst+nb){ double * s = a[src[i]]+c1; double sc = scale[dst[i]-ifirst]; if (src[i]<ifirst+nb)s=arow[src[i]-ifirst]; scalesubvect(sc,s,&(a[dst[i]][c1]),c2-c1); } } #pragma omp parallel for private(i) for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ copysubvect(arow[src[i]-ifirst],&(a[dst[i]][c1]),c2-c1); } } } void global_swap_using_src_and_dest(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double arow[][c2-c1], double ar2[][c2-c1], double scale[], int src[], int dst[], int length, PCONTROLS controls, PPARMS parms) { int current_proc; int current_lloc; int pivot_proc; int pivot_lloc; int i; int j; int myp = controls->rank_in_col; // fprintf(stderr,"global_swap ifirst=%d nb=%d c1=%d c2=%d\n",ifirst,nb,c1,c2); convert_global_index_to_local_rows(ifirst, &current_proc, &current_lloc, parms, controls); // Bcast scale in vertical direction // need not be done each time... MPI_Bcast(scale, nb, MPI_DOUBLE, current_proc, controls->col_comm); // fprintf(stderr,"global_swap first Bcast end\n"); // // create the data for bcast // // first, create list of lines to send on each processors in a column // // dprintf(0,"myp, ncol, nrow, npcol, nprow = %d %d %d %d %d\n", // myp, nncol, nnrow, parms->npcol, parms->nprow); int nlines[parms->nprow]; int iloc[parms->nprow]; int startloc[parms->nprow]; int bdst[length]; dump_vector("src", src, length); dump_vector("dst", dst, length); for(i=0;i<parms->nprow;i++)nlines[i]=0; for(i=0;i<length;i++){ if(dst[i]<ifirst+nb){ int cp, cl; convert_global_index_to_local_rows(src[i], &cp, &cl, parms, controls); nlines[cp]++; } } startloc[0]=0; for(i=0;i<parms->nprow-1;i++)startloc[i+1] = startloc[i]+nlines[i]; // dump_vector("nlines", nlines, parms->nprow); // dump_vector("startloc", startloc, parms->nprow); // // now each processor knows where to store its data // int ii =0; for(i=0;i<parms->nprow; i++)iloc[i]=0; for(i=0;i<length;i++){ // fprintf(stderr,"convert_index i=%d length=%d\n", i, length); if(dst[i]<ifirst+nb){ int cp, cl; convert_global_index_to_local_rows(src[i], &cp, &cl, parms, controls); // fprintf(stderr, "i=%d,ii=%d, dst=%d, src=%d, cp=%d, cl=%d\n", // i, ii, src[i], dst[i], cp, cl); bdst[startloc[cp]+iloc[cp]]=dst[i]-ifirst; iloc[cp]++; if (cp == myp){ // I have this data double * s = a[cl]+c1; double sc = scale[dst[i]-ifirst]; // fprintf(stderr, "places to copy: %d %d scale=%e c1=%d,c2=%d\n", // startloc[myp]+ii, dst[i]-ifirst, sc, c1,c2); // fprintf(stderr,"s[0]=%e\n", s[0]); // fprintf(stderr,"dst[0]=%e\n", *ar2[startloc[myp]+ii]); scalesubvect(sc,s,ar2[startloc[myp]+ii],c2-c1); // fprintf(stderr, "end scalesubvect\n"); ii++; } } } // fprintf(stderr,"loop end\n"); // dump_vector("bdst", bdst, nb); // // now ar2 have (local share of) scaled umat // So do the Bcast dumpsubmat("ar2 before bcast", c2-c1, ar2, nb, c2-c1, parms, controls); for(i=0;i<parms->nprow;i++){ // fprintf(stderr,"bcast proc %d lines %d\n", i, nlines[i]); if (nlines[i]){ MPI_Bcast(ar2[startloc[i]],(c2-c1)*nlines[i],MPI_DOUBLE, i, controls->col_comm); } } dumpsubmat("ar2 after bcast", c2-c1, ar2, nb, c2-c1, parms, controls); // dprintf(0,"global_swap, bcast part end\n"); int nlinesd[parms->nprow]; int startlocd[parms->nprow]; int sdst[length]; for(i=0;i<parms->nprow;i++)nlinesd[i]=0; for(i=0;i<parms->nprow;i++)iloc[i]=0; ii=0; for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ int cpd, cld; convert_global_index_to_local_rows(dst[i], &cpd, &cld, parms, controls); if(cpd == myp){ sdst[ii]= cld; ii++; } nlinesd[cpd]++; } } // dump_vector("nlinesd", nlinesd, parms->nprow); // dump_vector("sdst", sdst, nlinesd[myp]); startlocd[0]=0; for(i=0;i<parms->nprow-1;i++)startlocd[i+1] = startlocd[i]+nlinesd[i]; // // now each processor knows where to store its data // if(current_proc == myp){ // // this is swap data. All data comes from current_proc for(i=0;i<length;i++){ if(dst[i]>=ifirst+nb){ int cpd, cld; int cps, cls; convert_global_index_to_local_rows(dst[i], &cpd, &cld, parms, controls); convert_global_index_to_local_rows(src[i], &cps, &cls, parms, controls); copysubvect(&(a[cls][c1]),arow[startlocd[cpd]+iloc[cpd]],c2-c1); iloc[cpd]++; } } // // now arow (in current_proc) has all data to swap out // So send them out for(i=0;i<parms->nprow;i++){ if ((i != current_proc) && (nlinesd[i])){ MPI_Send(arow[startlocd[i]], nlinesd[i]*(c2-c1), MPI_DOUBLE, i, MPSENDBASETAG+i,controls->col_comm); } } }else{ // // other processors: receive data // MPI_Status status; if (nlinesd[myp]){ MPI_Recv(arow[startlocd[myp]], nlinesd[myp]*(c2-c1), MPI_DOUBLE, current_proc, MPSENDBASETAG+myp,controls->col_comm, &status); } } // // now all data to be stored are in arow and ar2 // first store arow (swapdata) to real locations // dprintf(0,"p2p swap end\n"); for(i=0;i<nlinesd[myp]; i++){ copysubvect(arow[startlocd[myp]+i],&(a[sdst[i]][c1]), c2-c1); } // dprintf(0,"p2p local swap end\n"); // then store arow2 (UMAT) to arow for(i=0;i<nb; i++){ // dprintf(0,"bdst[%d]=%d\n", i, bdst[i]); copysubvect(ar2[i],arow[bdst[i]], c2-c1); } // dprintf(0,"umat local copy end\n"); // and copy arow back to a in the case of current_proc if(current_proc == myp){ copysubmat(c2-c1, arow, nncol, (double(*)[])(a[current_lloc]+c1), nb, c2-c1); } // dprintf(0,"umat local copyback on current row end\n"); } void process_right_part_mpi_using_dls_phase1(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], int src[], int dst[], int length, double scale[], PCONTROLS controls, PPARMS parms) { dump_vector("pv", pv, nb); // fprintf(stderr,"Enter dls_phase1\n"); if (parms->vcommscheme == 0){ // fprintf(stderr,"Enter global_swap_using...\n"); global_swap_using_src_and_dest(ifirst, nb, c1, c2, nnrow, nncol, a, arow, (double(*)[])(arow[nb]), scale, src, dst, length, controls,parms); return; } int i,ii, j; // arow can be used as work area before bcast_umat // // dprintf(9,"process_rp_using_dls_p1, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); #if 1 for (ii=0;ii<nb;ii++){ i = ii+ifirst; // fprintf(stderr,"dls_phase 1 ii=%d\n",ii); // dprintf(0,"pv[%2d]=%2d\n", ii, pv[ii]); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } #else local_swap_using_src_and_dest(ifirst, nb, c1, c2, nnrow, nncol, a, arow, scale, src, dst, length, controls, parms); #endif // print_current_time("end swap and scale"); MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,arow); // print_current_time("end bcast_umat"); } void process_right_part_mpi_using_dls_phase1_old(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // arow can be used as work area before bcast_umat // // dprintf(9,"process_rp_using_dls_p1, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); for (ii=0;ii<nb;ii++){ i = ii+ifirst; // dprintf(9,"process_rp, i=%d\n", i); MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); // dprintf(9,"process_rp swap rows end, i=%d\n", i); MP_scale_row_blocked_using_scale(nnrow, nncol, a, parms, controls,i,c1,c2,0,scale[ii]); } // print_current_time("end swap and scale"); MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,arow); // dprintf(9,"end bcast_umat\n"); } void process_right_part_mpi_using_dls_phase2(int ifirst,int nb,int c1, int c2, int nnrow, int nncol, double a[nnrow][nncol], double acolinv[nb][nb], double acol[nnrow][nb], double arow[nb][c2-c1], int pv[], double scale[], PCONTROLS controls, PPARMS parms) { int i,ii; // dprintf(9,"process_rp_using_dls_p2, ifirst=%d c1, c2=%d %d\n", ifirst,c1,c2); // MP_update_multiple_blocked_global_using_lmat(nnrow, nncol, a, parms, // controls,ifirst,nb,c1,c2, // ifirst+nb,acol); #if 1 // MP_bcast_umat(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, // ifirst+nb,acol,arow); gdrsetforceswapab(); MP_update_using_lu(nnrow, nncol, a, parms, controls,ifirst,nb,c1,c2, ifirst+nb,acol,arow); gdrresetforceswapab(); #endif } void column_decomposition_recursive_mpi_old(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; dprintf(9,"column recursive %d %d\n", ifirst, nb); int havec=have_current_col(ifirst, parms, controls); if (!havec) return; if (nb <= 8){ dprintf(9,"column recursive calling transpose %d %d\n", ifirst, nb); column_decomposition_mpi_with_transpose(ifirst, nb, nnrow, nncol, a, b, pv,controls,parms); dprintf(9,"column recursive return from transpose %d %d\n", ifirst, nb); }else{ dprintf(9,"column recursive left part %d %d\n", ifirst, nb/2); column_decomposition_recursive_mpi_old(ifirst, nb/2, nnrow, nncol, a, b, pv,controls,parms); dprintf(9,"column process right part %d %d\n", ifirst, nb/2); process_right_part_mpi(ifirst, nb/2, nb/2, nnrow, nncol, a, b, pv,controls, parms,1); dprintf(9,"column recursive right part %d %d\n", ifirst+nb/2, nb/2); column_decomposition_recursive_mpi_old(ifirst+nb/2, nb/2, nnrow, nncol, a, b, pv+nb/2,controls,parms); // process the swap of rows for the left half int c1, c2; convert_global_col_range_to_local_range(ifirst, ifirst+nb/2, &c1, &c2, parms, controls); dprintf(9,"column recursive left part swap %d %d\n", ifirst, nb/2); for (ii=nb/2;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); } dprintf(9,"column recursive left part swap end %d %d\n", ifirst, nb/2); } } void column_decomposition_recursive_mpi(int ifirst,int nb,int nnrow, int nncol, double a[nnrow][nncol], double (*acol)[], double b[], int pv[], PCONTROLS controls, PPARMS parms) { int i,ii; int havec=have_current_col(ifirst, parms, controls); if (!havec) return; // print_current_time("enter column_deconp"); // dprintf(9,"enter column_decomp, i, nb=%d %d\n", ifirst,nb); if (nb <= 8){ BEGIN_TIMER(timer); column_decomposition_mpi_with_transpose(ifirst, nb, nnrow, nncol, a, b, pv,controls,parms); END_TIMER(timer,7,((double)(nb))*(nnrow-ifirst)); }else{ column_decomposition_recursive_mpi(ifirst, nb/2, nnrow, nncol, a, acol, b, pv,controls,parms); BEGIN_TIMER(timer0); process_right_part_mpi_withacol(ifirst, nb/2, nb/2, nnrow, nncol, a, acol, b, pv,controls, parms,1); END_TIMER(timer0,8,((double)(nb/2))*(nb/2)*(nnrow-ifirst)); column_decomposition_recursive_mpi(ifirst+nb/2, nb/2, nnrow, nncol, a, acol, b, pv+nb/2,controls,parms); // process the swap of rows for the left half int c1, c2; convert_global_col_range_to_local_range(ifirst, ifirst+nb/2, &c1, &c2, parms, controls); BEGIN_TIMER(timer1); for (ii=nb/2;ii<nb;ii++){ i = ii+ifirst; MP_swap_rows_blocked(nnrow, nncol, a, parms, controls,i,pv[ii], c1,c2); MP_scale_row_blocked(nnrow, nncol, a, parms, controls,i,c1,c2,1); } END_TIMER(timer1,9,((double)(nb/2))*(nb/2)); } // print_current_time("end column_deconp"); // dprintf(9,"end column_decomp, i, nb=%d %d\n", ifirst,nb); } void lu_mpi(int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n; n = controls->nrow*parms->nprow; for(i=0;i<n;i++){ lu_forward_onestep_mpi(i, nnrow, nncol, a, b, controls, parms); } backward_sub_mpi(nnrow, nncol, a, b,controls, parms); // printmat_MP(nnrow, nncol, a, controls, parms); } void lu_mpi_blocked(int nnrow, int nncol, double a[nnrow][nncol], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n, nb; n = controls->nrow*parms->nprow; nb = parms->nb; int pv[nb]; double acolinv[nb][nb]; double acol[nnrow][nb]; for(i=0;i<n;i+=nb){ // dprintf(9,"Enter lu_mpi_blocked, i = %d\n",i); column_decomposition_recursive_mpi_old(i, nb, nnrow, nncol, a, b, pv,controls, parms); MP_process_diagonal(nnrow, nncol, a, parms, controls, i,nb,acolinv,0); MP_process_lmat(nnrow, nncol, a, parms, controls,i,nb,i+nb,acol); int c1, c2; convert_global_col_range_to_local_range(i+nb, i+nb-1,&c1, &c2, parms, controls); process_right_part_mpi_using_dl(i, nb, c1, nncol, nnrow, nncol, a, acolinv, acol, pv,controls, parms); // process_right_part_mpi_using_dl_old(i, nb, 0, nnrow, nncol, a, acolinv, // acol, pv,controls, parms); } backward_sub_mpi(nnrow, nncol, a, b,controls, parms); // printmat_MP(nnrow, nncol, a, controls, parms); } int local_check_and_continue_rcomm_transfer(); void process_right_part_mpi_using_dls_concurrent(int i,int nb,int cfirst, int nnrow, int nncol, double a[nnrow][nncol], double aip[nb][nb], double acp[nnrow][nb], double * arow, double * arow2, int * pvp, double * scalep, PCONTROLS controls, PPARMS parms) { int cs,ce,cenext; double (*arp)[]; double (*arp2)[]; arp= (double(*)[]) arow; arp2= (double(*)[]) arow2; int ninc; // ninc= nb*4; // if (ninc > 4096) ninc = nb*2; ninc= nb; // should not make bigger than 8 (arow memory limit might be exceeded) // ninc= nb/2; #ifndef CONCURRENT_UCOMM // non-overlapping comm // ninc=nncol; #endif BEGIN_TIMER(timerx); int n=parms->n; int src[n]; int dst[n]; int length; dumpsubmat("a before phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); generate_src_and_dst_lists(i, pvp, src, dst, &length,controls, parms); for(cs=cfirst; cs<nncol;cs+= ninc){ ce = cs + ninc; if (ce >= controls->ncol)ce = nncol; if (ce >nncol) ce = nncol; if (ce != nncol){ cenext = ce+ninc; if (cenext >= controls->ncol)cenext = nncol; } if (cs == cfirst){ // dprintf(9,"using_dls call first phase1\n"); BEGIN_TIMER(timer); print_current_time("enter first dls_phase_1"); process_right_part_mpi_using_dls_phase1(i, nb, cs, ce, nnrow, nncol, a, aip, acp,arp, pvp,src, dst, length, scalep, controls, parms); print_current_time("end first dls_phase_1"); dumpsubmat("a after phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); dumpsubmat("arow", ce-cs, arp, nb, ce-cs, parms, controls); END_TIMER(timer,3,((double)(ce-cs))*nb); // print_current_time("end first dls_phase_1"); // dprintf(9,"using_dls end first phase1\n"); } #ifndef DO_RCOMM_LAST if (controls->check_rcomm_transfer){ // dprintf(9,"using_dls call local_check_and_continue_rcomm\n"); int rcomm_state = local_check_and_continue_rcomm_transfer(); // dprintf(9,"using_dls, cs=%d, rcomm=%d\n", cs, rcomm_state); } #endif #ifdef CONCURRENT_UCOMM omp_set_nested(1); #pragma omp parallel #pragma omp sections #endif { #ifdef CONCURRENT_UCOMM #pragma omp section #endif { print_current_time("enter dls_phase_2"); BEGIN_TIMER(timer); process_right_part_mpi_using_dls_phase2(i, nb, cs, ce, nnrow, nncol, a, aip, acp,arp, pvp,scalep, controls, parms); gdrsetskipsendjmat(); double x=parms->n - i; END_TIMER(timer,2,x*nb*(ce-cs)); print_current_time("end dls_phase_2"); } #ifdef CONCURRENT_UCOMM #pragma omp section #endif if (ce < nncol){ #ifdef CONCURRENT_UCOMM usleep(10000); #endif print_current_time("enter dls_phase_1"); BEGIN_TIMER(timer); process_right_part_mpi_using_dls_phase1(i, nb, ce, cenext, nnrow, nncol, a, aip, acp,arp2, pvp, src, dst, length, scalep, controls, parms); END_TIMER(timer,3,((double)(ce-cs)*nb)); print_current_time("end dls_phase_1"); } } swapvoidptr((void**)&arp2, (void**)&arp); if (ce == nncol) cs = nncol; } gdrresetskipsendjmat(); double x=parms->n - i; END_TIMER(timerx,1,x*(x-nb)*nb*2); } void MP_process_row_comm_using_Bcast(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { // print_current_time("enter process_row_comm"); if (i+nb < n){ int current_prow; int current_lrow; int ii = i + nb; convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); int pcol = pcolid(ii,parms,controls); if (current_prow == controls->rank_in_col){ MPI_Bcast(aip2,sizeof(double)*nb*nb,MPI_BYTE, pcol, controls->row_comm); } int nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); MPI_Bcast(pvp2,sizeof(int)*nb,MPI_BYTE, pcol, controls->row_comm); MPI_Bcast(scalep2,sizeof(double)*nb,MPI_BYTE, pcol, controls->row_comm); MPI_Bcast(acp2,sizeof(double)*nrows*nb,MPI_BYTE, pcol, controls->row_comm); } print_current_time("end process_row_comm"); } void MP_process_row_comm(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { print_current_time("enter process_row_comm"); if (i+nb < n){ MPI_Status status; int current_prow; int current_lrow; int ii = i + nb; BEGIN_TIMER(timer); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); int nextp= (controls->rank_in_row+1)%parms->npcol; int prevp= (controls->rank_in_row-1+parms->npcol)%parms->npcol; int pcol = pcolid(ii,parms,controls); if (current_prow == controls->rank_in_col){ #if 0 if (controls->rank_in_row != pcol){ MPI_Recv(aip2, sizeof(double)*nb*nb,MPI_BYTE, prevp,MPRCOMMTAG, controls->row_comm, &status); } if (nextp != pcol){ MPI_Send(aip2, sizeof(double)*nb*nb,MPI_BYTE, nextp,MPRCOMMTAG, controls->row_comm); } #endif // dprintf(9,"call bcast aip2\n"); MPI_Bcast(aip2,sizeof(double)*nb*nb,MPI_BYTE, pcol, controls->row_comm); } // dprintf(9,"call prepare lmat\n"); int nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); // dprintf(9,"call bcast pvp2\n"); MPI_Bcast(pvp2,sizeof(int)*nb,MPI_BYTE, pcol, controls->row_comm); // dprintf(9,"call bcast scalp2\n"); MPI_Bcast(scalep2,sizeof(double)*nb,MPI_BYTE, pcol, controls->row_comm); // dprintf(9,"call bcast acp2\n"); // MPI_Bcast(acp2,sizeof(double)*nrows*nb,MPI_BYTE, // pcol, controls->row_comm); MP_mybcast(acp2,sizeof(double)*nrows*nb, pcol, controls->row_comm); END_TIMER(timer,0,(double)((n-i-nb)*nb)); } print_current_time("end process_row_comm"); } static RCOMMT rcomm; #define MAXPENDINGMESSAGE 100 void register_singlemessage_to_rcomm(PRCOMMT rcomm, void * p, int length) { int i = rcomm->nmessages; (rcomm->message+i)->mptr=p; (rcomm->message+i)->length=length; (rcomm->message+i)->message_state = INITIALIZED; if (rcomm->first){ (rcomm->message+i)->message_state = RECEIVED; if (rcomm->last){ (rcomm->message+i)->message_state = SENT; } } rcomm->nmessages++; } int check_and_continue_rcomm_transfer(PRCOMMT rcomm, int blockmode) { int i; int nend = 0; MPI_Status status; int received_count = 0; int sent_count = 0; for(i=0;i<rcomm->nmessages; i++){ PMESSAGE mp = rcomm->message+i; if ((mp->message_state == INITIALIZED) && (i<MAXPENDINGMESSAGE+received_count)){ // fprintf(stderr,"new Irecv at %d\n",i); int retval=MPI_Irecv(mp->mptr, mp->length, MPI_BYTE,rcomm->prevp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Irecv error in start_rcomm"); mp->message_state = RECEIVING; } if (mp->message_state == RECEIVING){ int flag; if (blockmode){ MPI_Wait(&(mp->request), &status); flag = 1; }else{ MPI_Test(&(mp->request),&flag, &status); } if (flag)mp->message_state = RECEIVED; } if (mp->message_state == RECEIVED){ if(received_count < i)received_count = i; if (rcomm->last){ mp->message_state = SENT; }else{ if (i < sent_count + MAXPENDINGMESSAGE){ // fprintf(stderr,"new Isend at %d\n",i); int retval=MPI_Isend(mp->mptr, mp->length, MPI_BYTE,rcomm->nextp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Isend error in check_rcomm"); mp->message_state = SENDING; } } } if (mp->message_state == SENDING){ int flag; if (blockmode){ MPI_Wait(&(mp->request), &status); flag = 1; }else{ MPI_Test(&(mp->request),&flag, &status); } if (flag)mp->message_state = SENT; } if (mp->message_state == SENDING) nend ++; if (mp->message_state == SENT){ if (sent_count < i)sent_count = i; } } return rcomm->nmessages - nend; } int local_check_and_continue_rcomm_transfer() { return check_and_continue_rcomm_transfer(&rcomm, 0); } void start_rcomm_transfer(PRCOMMT rcomm) { int i; // dprintf(9,"Enter start_rcomm\n"); for(i=0;(i<rcomm->nmessages) &&(i<MAXPENDINGMESSAGE); i++){ PMESSAGE mp = rcomm->message+i; if (mp->message_state == INITIALIZED){ // dprintf(9,"Irecv register i=%d length=%d\n",i, mp->length); int retval=MPI_Irecv(mp->mptr, mp->length, MPI_BYTE,rcomm->prevp, MPNRMESSAGETAG+i,rcomm->comm, &(mp->request)); if(retval != MPI_SUCCESS)MP_error("MPI_Irecv error in start_rcomm"); mp->message_state = RECEIVING; } // dprintf(9,"start_rcom, i=%d status=%d\n", i, mp->message_state); } check_and_continue_rcomm_transfer(rcomm,0); } void register_to_rcomm(PRCOMMT rcomm, void * p, int length) { int i; for (i=0;i<length; i+= MAXRCOMMMESSAGE){ int len = MAXRCOMMMESSAGE; if (i+len > length) len = length-i; register_singlemessage_to_rcomm(rcomm, ((char*)p)+i, len); } } int MP_process_row_comm_init(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { print_current_time("enter process_row_comm_init"); controls->check_rcomm_transfer = 1; int nrows=0; if (i+nb < n){ int current_prow; int current_lrow; int ii = i + nb; BEGIN_TIMER(timer); convert_global_index_to_local_rows(ii, &current_prow, &current_lrow, parms, controls); rcomm.nextp= (controls->rank_in_row+1)%parms->npcol; rcomm.prevp= (controls->rank_in_row-1+parms->npcol)%parms->npcol; rcomm.nmessages=0; rcomm.first = (controls->rank_in_row == pcolid(ii,parms,controls)); rcomm.last = (rcomm.nextp == pcolid(ii,parms,controls)); rcomm.comm = controls->row_comm; rcomm.preceive=0; rcomm.psend=0; if (current_prow == controls->rank_in_col){ register_to_rcomm(&rcomm, aip2, nb*nb*sizeof(double)); } nrows=MP_prepare_lmat(nnrow, nncol, a, parms, controls, ii,nb,ii+nb,acp2); register_to_rcomm(&rcomm, pvp2, sizeof(int)*nb); register_to_rcomm(&rcomm, scalep2,sizeof(double)*nb); register_to_rcomm(&rcomm, acp2,sizeof(double)*nrows*nb); start_rcomm_transfer(&rcomm); } print_current_time("end process_row_comm_init"); return nrows; } void MP_process_row_comm_test(int nnrow, int nncol, double a[nnrow][nncol], PCONTROLS controls, PPARMS parms, int i, int nb, int n, int * pvp2, double * scalep2, double (*aip2)[], double (*acp2)[]) { MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2, aip2, acp2); check_and_continue_rcomm_transfer(&rcomm,1); // MPI_Barrier(controls->row_comm); } void lu_mpi_blocked_lookahead(int nnrow, int nncol, double a[nnrow][nncol], double (*acol)[],double (*acol2)[], double (*dinv)[], double arow[],double arow2[], double b[], PCONTROLS controls, PPARMS parms) { int i, j, k, n, nb; n = controls->nrow*parms->nprow; nb = parms->nb; int pv[nb]; int pv2[nb]; double scale[nb]; double scale2[nb]; double acolinv[nb][nb] __attribute__((aligned(128))); double acolinv2[nb][nb] __attribute__((aligned(128))); double (*aip)[]; double (*aip2)[]; double (*acp)[]; double (*acp2)[]; double (*arp)[]; double (*arp2)[]; double (*aptmp)[]; int * pvp; int * pvp2; int * pvptmp; double * scalep; double * scalep2; double * scaleptmp; i=0; print_current_time("enter first rfact"); BEGIN_TIMER(timertotal); BEGIN_TIMER(timerx); BEGIN_TIMER(timercd); // void gdrsetusemultithread(); column_decomposition_recursive_mpi(i, nb, nnrow, nncol, a, acol, b, pv,controls, parms); END_TIMER(timercd,30,(double)((n-i-nb)*nb)); BEGIN_TIMER(timer00); print_current_time("end first rfact"); // dprintf(9,"call bcast pv\n"); MPI_Bcast(pv,sizeof(int)*nb,MPI_BYTE, pcolid(i,parms,controls), controls->row_comm); MP_construct_scalevector(nnrow, nncol, a, parms, controls, i, nb, scale); MPI_Bcast(scale,sizeof(double)*nb,MPI_BYTE, pcolid(i,parms,controls), controls->row_comm); print_current_time("enter process_diagonal"); MP_process_diagonal(nnrow, nncol, a, parms, controls, i,nb,acolinv,0); print_current_time("end MP_process_diagonal"); int nrows=MP_process_lmat(nnrow, nncol, a, parms, controls,i,nb,i+nb,acol); print_current_time("end MP_process_lmat"); MP_calculate_ld(nb, acol, nrows, acol2, acolinv,i,controls, parms); END_TIMER(timerx,5,(double)((n-i-nb)*nb)); END_TIMER(timer00,37,0.0); END_TIMER(timer00,39,0.0); print_current_time("end first lookahead"); aip=acolinv; acp = acol; aip2=acolinv2; acp2 = acol2; pvp=pv; pvp2=pv2; scalep=scale; scalep2=scale2; for(i=0;i<n;i+=nb){ // void gdrsetusemultithread(); // fprintf(stderr,"lu2_mpi i=%d\n",i); int c1, c2, cfirst; int havec = have_current_col(i+nb, parms, controls); convert_global_col_range_to_local_range(i+nb, i+nb*2-1,&c1, &c2, parms, controls); arp= (double(*)[]) arow; c2 ++; BEGIN_TIMER(timerx); print_current_time("enter rfact"); int src[n]; int dst[n]; int length; dump_vector("pvp", pvp, nb); generate_src_and_dst_lists(i, pvp, src, dst, &length,controls, parms); if ((i+nb < n) && havec){ // if (0){ int ii = i + nb; BEGIN_TIMER(timer00); // dprintf(9,"rfact call dls_phase1, ii, nb=%d %d\n", ii, nb); dumpsubmat("a before first phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); process_right_part_mpi_using_dls_phase1(i, nb, c1, c2, nnrow, nncol, a, aip, acp,arp, pvp,src, dst, length, scalep, controls, parms); dumpsubmat("a after first phase1", nncol, a, controls->nrow, controls->ncol, parms, controls); // dprintf(9,"rfact call dls_phase2, ii, nb=%d %d\n", ii, nb); process_right_part_mpi_using_dls_phase2(i, nb, c1, c2, nnrow, nncol, a, aip, acp,arp, pvp,scalep, controls, parms); // dprintf(9,"rfact call column_dec ii, nb=%d %d\n", ii, nb); END_TIMER(timer00,37,0.0); END_TIMER(timer00,40,0.0); BEGIN_TIMER(timercd); column_decomposition_recursive_mpi(ii, nb, nnrow, nncol, a, acp2, b, pvp2,controls, parms); END_TIMER(timercd,30,(double)((n-i-nb)*nb)); // dprintf(9,"rfact call MP_construct_scale ii, nb=%d %d\n", ii, nb); MP_construct_scalevector(nnrow, nncol, a, parms, controls, ii, nb, scalep2); // dprintf(9,"rfact end, ii, nb=%d %d\n", ii, nb); cfirst=c2; }else{ cfirst=c1; } BEGIN_TIMER(timer01); if (i+nb < n){ int ii = i + nb; // dprintf(9,"call phase1 %d\n",i); MP_process_diagonal_phase1(nnrow, nncol, a, parms, controls, ii,nb,aip2,0); // dprintf(9,"call phase1 end\n"); } END_TIMER(timer01,37,0.0); END_TIMER(timer01,41,0.0); END_TIMER(timerx,5,(double)((n-i-nb)*nb)); print_current_time("end rfact"); // dprintf(9,"call process_row_comm\n"); BEGIN_TIMER(timer03); #ifndef DO_RCOMM_LAST nrows=MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2,aip2, acp2); #endif // dprintf(9,"call process_right_part_... %d\n",i); // check_and_continue_rcomm_transfer(&rcomm,1); // test to finish rcomm here -- 2011/6/19 process_right_part_mpi_using_dls_concurrent(i, nb, cfirst, nnrow, nncol, a, aip, acp, arow, arow2, pvp, scalep, controls, parms); BEGIN_TIMER(timery); BEGIN_TIMER(timer02); // dprintf(9,"end process_right_part_... %d\n",i); #ifdef DO_RCOMM_LAST nrows=MP_process_row_comm_init(nnrow, nncol, a, controls, parms, i, nb, n, pvp2, scalep2,aip2, acp2); #endif check_and_continue_rcomm_transfer(&rcomm,1); // dprintf(9,"end check_and_continue... %d\n",i); print_current_time("end lmat/dls"); #ifndef DUPOSTMUL MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, i,c1,nncol,nb,aip); #else MP_update_multiple_using_diagonal(nnrow, nncol, a, parms, controls, i,controls->ncol,controls->ncol+1,nb,aip); #endif MP_store_diagonal_inverse(nnrow, nb, dinv, parms,controls, i, aip); print_current_time("end mult_diag"); if (i+nb < n){ int ii = i + nb; MP_calculate_ld_phase1(nb, acp2, nrows, aip2, ii, controls, parms); print_current_time("end ld_phase1"); MP_calculate_ld_phase2(nb, acp2, nrows, acp, aip2, ii,controls, parms); print_current_time("end ld_phase2"); } // fprintf(stderr, "before swap\n"); dump_vector("pvp2", pvp2, nb); dump_vector("pvp ", pvp, nb); swapvoidptr((void**)&acp2, (void**)&acp); swapvoidptr((void**)&aip2, (void**)&aip); swapvoidptr((void**)&pvp2, (void**)&pvp); swapvoidptr((void**)&scalep2, (void**)&scalep); // fprintf(stderr, "after swap\n"); dump_vector("pvp2", pvp2, nb); dump_vector("pvp ", pvp, nb); if (MP_myprocid()==0) fprintf(stderr,"lu_mpi look i=%d end\n", i); END_TIMER(timer02,37,0.0); END_TIMER(timer02,42,0.0); END_TIMER(timery,5,0.0); } END_TIMER(timertotal,4,((double)(n))*((double)n)*(n*2.0/3.0)); print_current_time("enter backward_sub"); BEGIN_TIMER(timerback); backward_sub_blocked_mpi(nnrow, nncol, a,nb, dinv, b,controls, parms); END_TIMER(timerback,28,(double)(n)*n); print_current_time("end backward_sub"); // printmat_MP(nnrow, nncol, a, controls, parms); } void usage() { fprintf(stderr,"lu2_mpi options:\n"); fprintf(stderr," -h: This help\n"); fprintf(stderr," -s: seed (default=1)\n"); fprintf(stderr," -n: size of matrix (default=8192)\n"); fprintf(stderr," -r: allocate processors in row-major order \n"); fprintf(stderr," -b: block size (default=2048)\n"); fprintf(stderr," -p: processors in row (default=1)\n"); fprintf(stderr," -q: processors in column (default=1)\n"); fprintf(stderr," -g: usehugetlbfs (default=no)\n"); fprintf(stderr," -w: two process per node mode (use two cards)\n"); fprintf(stderr," -B: first card ID (default=0)\n"); fprintf(stderr," -N: Number of cards per MPI process(default=1)\n"); fprintf(stderr," -T: Max process ID for timing info(default=0: all)\n"); fprintf(stderr," -v: scheme for vertical communication (default=0: advanced, else: simple)\n"); fprintf(stderr," -S: stress factor for thermal test(default=0: no additional stress)\n"); } extern char *optarg; extern int optind; void print_parms(FILE* stream, PPARMS parms) { fprintf(stream,"N=%d Seed=%d NB=%d\n", parms->n,parms->seed,parms->nb); fprintf(stream,"P=%d Q=%d Procs row major=%d usehuge=%d\n", parms->nprow,parms->npcol, parms->procs_row_major, parms->usehugepage); fprintf(stream,"usetwocards=%d ncards=%d cardid=%d maxpt=%d vcommscheme=%d stress=%d\n", parms->twocards, parms->ncards, parms->firstcard, parms->maxpidfortiming, parms->vcommscheme, parms->stress_factor); } void read_parms(int argc, char * argv[], PPARMS parms) { int ch; static struct option longopts[] = { { "help", no_argument, 0, 'h' }, { "block_size", optional_argument, NULL, 'b' }, { "seed", optional_argument, NULL, 's' }, { "ndim_matrix", required_argument, NULL, 'n' }, { "processors_in_row", optional_argument, NULL, 'p' }, { "processors_in_column", optional_argument, NULL, 'q' }, { "processors_row_major", no_argument, 0, 'r' }, { "usehugepage", no_argument, 0, 'g' }, { "usetwocards", no_argument, 0, 'w' }, { "ncards", optional_argument, NULL, 'N' }, { "max_pid_for_timing", optional_argument, NULL, 'T' }, { "vcomm_scheme", optional_argument, NULL, 'v' }, { "stress_factor", optional_argument, NULL, 'S' }, { NULL, 0, NULL, 0 } }; MP_message("enter read_parms"); parms->seed=1; parms->n=8192; parms->nb = 2048; parms->nprow=1; parms->npcol=1; parms->procs_row_major = 0; parms->usehugepage = 0; parms->twocards = 0; parms->ncards = 1; parms->firstcard = 0; parms->maxpidfortiming = 0; parms->vcommscheme = 0; parms->stress_factor=0; for(argc=0;argv[argc]!=NULL;argc++); // the above is necessary to fixup the argc value messed up by MPICH... // JM 2009/9/21 while((ch=getopt_long(argc,argv,"B:N:S:T:b:ghn:p:q:rs:v:w",longopts, NULL))!= -1){ fprintf(stderr,"optchar = %c optarg=%s\n", ch,optarg); switch (ch) { case 'B': parms->firstcard = atoi(optarg); break; case 'N': parms->ncards = atoi(optarg); break; case 'S': parms->stress_factor = atoi(optarg); break; case 'T': parms->maxpidfortiming = atoi(optarg); break; case 'b': parms->nb = atoi(optarg); break; case 'g': parms->usehugepage = 1; break; case 's': parms->seed = atoi(optarg); break; case 'n': parms->n = atoi(optarg); break; case 'p': parms->npcol = atoi(optarg); break; case 'q': parms->nprow = atoi(optarg); break; case 'v': parms->vcommscheme = atoi(optarg); break; case 'r': parms->procs_row_major = 1; break; case 'w': parms->twocards = 1; break; case 'h': usage(); exit(1); case '?':usage(); exit(1); break; default:break; } } argc -= optind; argv += optind; print_parms(stderr, parms); print_parms(stdout, parms); fprintf(stderr,"P, Q, MP_proccount = %d %d %d\n", parms->npcol, parms->nprow,MP_proccount()); if (parms->nprow*parms->npcol != MP_proccount()){ MP_error("P*Q != np"); } } void MP_broadcast_parms(PPARMS parms) { fprintf(stderr,"broadcast_parms size= %d\n", sizeof(PARMS)); MP_bcast((void*)parms, sizeof(PARMS)); MP_message("broadcast_parms normal end"); print_parms(stderr, parms); } void set_parms(int argc, char * argv[], PPARMS parms) { MP_message("Enter set_parms"); if (MP_myprocid()==0)read_parms(argc, argv, parms); MP_message("read_parms end"); MP_broadcast_parms(parms); MP_message("broadcast end"); } void setup_controls(PPARMS parms, PCONTROLS controls) { controls->nrow = parms->n/parms->nprow; controls->ncol = parms->n/parms->npcol; controls->nnrow = controls->nrow; controls->nncol = controls->ncol+NNCOLINC; controls->check_rcomm_transfer = 0; } int main(int argc, char * argv[]) { int ch; PARMS parms; CONTROLS controls; int i; MP_initialize(&argc,&argv); MP_message("Return from MP_initialize"); set_parms(argc, argv, &parms); setup_controls(&parms, &controls); MP_message("Return from setup_controls"); int nb, n, seed; nb = parms.nb; n = parms.n; seed = parms.seed; double (*a)[]; double (*acol)[]; double (*acol2)[]; double (*dinv)[]; double *arow, *arow2; int * pv; double *b, *b0, *bcopy; long int nl=n; set_max_pid_for_print_time(parms.maxpidfortiming); gdrsetboardid(parms.firstcard); if (parms.twocards){ gdrsetboardid(MP_myprocid() %2); } gdrdgemm_set_stress_factor(parms.stress_factor); if (parms.maxpidfortiming){ if (MP_myprocid() >= parms.maxpidfortiming)set_matmul_msg_level(0); } if(parms.ncards > 1) gdrsetnboards(parms.ncards); if (parms.usehugepage){ dprintf(9,"hugetlb: usehuge = %d\n", parms.usehugepage); MP_allocate_hugetlbfs("/mnt/huge/aaa", (void**)&a, (void**)&acol, (void**)&acol2,(void**)&dinv,(void**)&arow, (void**)&arow2, controls.nnrow, nb,controls.nncol); }else{ int nbb = nb+32; dprintf(9,"malloc: usehuge = %d\n", parms.usehugepage); a = (double(*)[]) malloc(sizeof(double)*controls.nnrow*controls.nncol); acol = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); acol2 = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); dinv = (double(*)[]) malloc(sizeof(double)*controls.nnrow*nbb); arow = (double*) malloc(sizeof(double)*nb*nbb*2); arow2 = (double*) malloc(sizeof(double)*nb*nbb*2); } fprintf(stderr,"Return from MP_allocate\n"); // init_matoutfid(); if(MP_myprocid())set_debug_level(8); { // omp_set_nested(1); // fprintf(stderr,"Omp_get_nested=%d\n", omp_get_nested()); } b = (double*)malloc(sizeof(double)*controls.nnrow); b0 = (double*)malloc(sizeof(double)*controls.nnrow); bcopy = (double*)malloc(sizeof(double)*parms.n); pv = (int*)malloc(sizeof(int)*controls.nnrow); MP_setup_communicators(&parms, &controls); fprintf(stderr,"Return from MP_setup_comm"); sleep(MP_myprocid()%10); if (controls.nnrow > nb*4){ reset_gdr(nb, acol2, nb, acol, controls.nnrow); }else{ reset_gdr(nb, arow2, nb, arow, nb*4); } fprintf(stderr,"Proc %d Return from reset_gdr\n", MP_myprocid()); if (seed == 0){ readmat(n,a); }else{ MP_randomsetmat(controls.nncol, controls.nnrow, a,&parms, &controls,1,b0); } #if 1 MP_sync(); if (controls.nnrow > nb*4){ reset_gdr(nb, acol2, nb, acol, controls.nnrow); }else{ reset_gdr(nb, arow2, nb, arow, nb*4); } #endif MP_sync(); fprintf(stderr,"Proc %d Return from reset_gdr\n", MP_myprocid()); timer_init(); init_timer(); if (MP_myprocid()==0){ print_current_datetime("Start calculation "); } init_current_time(); MP_message("enter lu_mpi"); lu_mpi_blocked_lookahead(controls.nnrow, controls.nncol, a, acol,acol2,dinv,arow,arow2, b,&controls, &parms); // dprintf(9,"end lu_mpi_lookahead\n"); // printmat_MP(controls.nnrow, controls.nncol, a, &controls, &parms); double ctime=cpusec(); double wtime=wsec(); if (MP_myprocid()==0){ print_current_datetime("End calculation "); } if (seed == 0){ readmat(n,a); }else{ MP_randomsetmat(controls.nncol, controls.nnrow, a,&parms, &controls,0,b0); } dprintf(9,"end randomsetmat\n"); double a1, ao, b1, bo; MP_calcnorm(controls.nncol, controls.nnrow, a, &parms, &controls, &a1, &ao); MP_calcvnorm(b, &parms, &controls, &b1, &bo); check_solution_mpi(controls.nnrow, controls.nncol, a, b, bcopy, &controls, &parms); // dprintf(9,"end check_solution\n"); // printmat_MP(controls.nnrow, controls.nncol, a, &controls, &parms); double einf = print_errors( b,b0,&controls, &parms); print_timers_mpi(0); if (MP_myprocid()==0){ double nd = parms.n; HPLlogprint(stderr, parms.n, parms.nb, parms.nprow, parms.npcol, wtime); HPLerrorprint(stderr, einf, a1, ao, b1, bo, nd); fflush(stderr); fflush(stdout); fprintf(stderr,"\n"); fprintf(stderr,"\n"); fprintf(stderr,"\n"); fprintf(stderr,"\n"); HPLlogprint(stdout, parms.n, parms.nb, parms.nprow, parms.npcol, wtime); HPLerrorprint(stdout, einf, a1, ao, b1, bo, nd); // printf("\n\n Norms = %e %e %e %e\n", a1, ao, b1, bo); double speed = nd*nd*nd*2.0/3.0/wtime/1e9; fprintf(stderr,"\n\n cpusec = %g wsec=%g %g Gflops\n\n\n", ctime, wtime, speed); printf("\n\n cpusec = %g wsec=%g %g Gflops\n\n\n", ctime, wtime, speed); fflush(stdout); } // MP_message("ALL end --- perform MPI_Barrier\n"); MPI_Barrier(MPI_COMM_WORLD); // MP_message("MPI_Barrier end\n"); MPI_Barrier(MPI_COMM_WORLD); // MP_message("Second MPI_Barrier end\n"); MPI_Finalize(); // MP_message("MPI_Finalize end\n"); return 0; }
nestedfn-6.c
extern void abort (void); int j; int main (void) { int i; void nested (void) { i = 0; } #pragma omp parallel for lastprivate (i) for (i = 0; i < 50; i += 3) ; if (i != 51) abort (); #pragma omp parallel for lastprivate (j) for (j = -50; j < 70; j += 7) ; if (j != 76) abort (); return 0; }
fac_restrict2.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.19 $ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Are private static arrays a problem? * ******************************************************************************/ /****************************************************************************** * FAC composite level restriction. * Injection away from the refinement patches; constant restriction * inside patch. ******************************************************************************/ #include "_hypre_sstruct_ls.h" #include "fac.h" #define MapCellRank(i, j , k, rank) \ { \ rank = 4*k + 2*j + i; \ } #define InverseMapCellRank(rank, stencil) \ { \ HYPRE_Int ij,ii,jj,kk; \ ij = (rank%4); \ ii = (ij%2); \ jj = (ij-ii)/2; \ kk = (rank-2*jj-ii)/4; \ hypre_SetIndex(stencil, ii, jj, kk); \ } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictData data structure *--------------------------------------------------------------------------*/ typedef struct { HYPRE_Int nvars; hypre_Index stride; hypre_SStructPVector *fgrid_cvectors; /* the grid of this vector may not be on the actual grid */ hypre_BoxArrayArray **identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes; hypre_BoxArrayArray **fullwgt_sendboxes; HYPRE_Int ***own_cboxnums; /* local crs boxnums of ownboxes */ hypre_CommPkg **interlevel_comm; /* hypre_CommPkg **intralevel_comm;*/ /* may need to build an intra comm so that each processor only fullwts its own fine data- may need to add contrib */ } hypre_FacSemiRestrictData2; /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictCreate *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictCreate2( void **fac_restrict_vdata_ptr) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data; fac_restrict_data = hypre_CTAlloc(hypre_FacSemiRestrictData2, 1); *fac_restrict_vdata_ptr = (void *) fac_restrict_data; return ierr; } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictSetup: * Two types of communication are needed- one for the interlevel coarsened * fine boxes, and the other for the ghostlayer of the restricted vector. * * Approach: Identity away from the patches & fullweighting in a patch. * Since a fbox may not have the desired mapping * fbox= [a_0, a_1, a_2]x [b_0, b_1, b_2], a_i= c_i*rfactor[i] * b_i= f_i*rfactor[i] + g_i * with g_i= (rfactor[i]-1), attention must be paid to what the own_boxes, * send_boxes, and recv_boxes are. These map overlap. The reason: * myproc fullwgts what it can or equivalently, gets the restriction * contributions of its data. Some off_procs can compute the remaining * part of the agglomerate belonging to myproc and communicate it to myproc. * Hence, myproc's own_boxes contains these nodes as well as myproc's * recv_boxes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictSetup2( void *fac_restrict_vdata, hypre_SStructVector *r, HYPRE_Int part_crse, HYPRE_Int part_fine, hypre_SStructPVector *rc, hypre_Index rfactors ) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data = fac_restrict_vdata; MPI_Comm comm= hypre_SStructPVectorComm(rc); hypre_CommInfo *comm_info; hypre_CommPkg **interlevel_comm; hypre_SStructPVector *rf= hypre_SStructVectorPVector(r, part_fine); hypre_StructVector *s_rc, *s_cvector; hypre_SStructPGrid *pgrid; hypre_SStructPVector *fgrid_cvectors; hypre_SStructPGrid *fgrid_coarsen; hypre_BoxArrayArray **identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes; hypre_BoxArrayArray **fullwgt_sendboxes; hypre_BoxArray *boxarray; hypre_BoxArray *tmp_boxarray, *intersect_boxes; HYPRE_Int ***own_cboxnums; hypre_BoxArrayArray **send_boxes, *send_rboxes; HYPRE_Int ***send_processes; HYPRE_Int ***send_remote_boxnums; hypre_BoxArrayArray **recv_boxes, *recv_rboxes; HYPRE_Int ***recv_processes; HYPRE_Int ***recv_remote_boxnums; hypre_BoxManager *boxman; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_Box box, scaled_box; hypre_Index zero_index, index, ilower, iupper; HYPRE_Int ndim= hypre_SStructVectorNDim(r); HYPRE_Int myproc, proc; HYPRE_Int nvars, vars; HYPRE_Int num_values; HYPRE_Int i, cnt1, cnt2; HYPRE_Int fi, ci; hypre_MPI_Comm_rank(comm, &myproc); hypre_ClearIndex(zero_index); nvars= hypre_SStructPVectorNVars(rc); (fac_restrict_data -> nvars)= nvars; hypre_CopyIndex(rfactors, (fac_restrict_data -> stride)); for (i= ndim; i< 3; i++) { rfactors[i]= 1; } /* work vector for storing the fullweighted fgrid boxes */ hypre_SStructPGridCreate(hypre_SStructPVectorComm(rf), ndim, &fgrid_coarsen); pgrid= hypre_SStructPVectorPGrid(rf); for (vars= 0; vars< nvars; vars++) { boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); hypre_ForBoxI(fi, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_SStructPGridSetExtents(fgrid_coarsen, hypre_BoxIMin(&box), hypre_BoxIMax(&box)); } } hypre_SStructPGridSetVariables( fgrid_coarsen, nvars, hypre_SStructPGridVarTypes(pgrid) ); hypre_SStructPGridAssemble(fgrid_coarsen); hypre_SStructPVectorCreate(hypre_SStructPGridComm(fgrid_coarsen), fgrid_coarsen, &fgrid_cvectors); hypre_SStructPVectorInitialize(fgrid_cvectors); hypre_SStructPVectorAssemble(fgrid_cvectors); /* pgrid fgrid_coarsen no longer needed */ hypre_SStructPGridDestroy(fgrid_coarsen); fac_restrict_data -> fgrid_cvectors= fgrid_cvectors; /*-------------------------------------------------------------------------- * boxes that are not underlying a fine box: * * algorithm: subtract all coarsened fine grid boxes that intersect with * this processor's coarse boxes. Note that we cannot loop over all the * coarsened fine boxes and subtract them from the coarse grid since we do * not know if some of the overlying fine boxes belong on another * processor. For each cbox, we get a boxarray of boxes that are not * underlying-> size(identity_arrayboxes[vars])= #cboxes. * * Note that no contraction is needed for the intersect boxes since they * will be subtracted from the cbox. Contraction can erroneously lead * to bigger identity boxes. *--------------------------------------------------------------------------*/ identity_arrayboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); pgrid= hypre_SStructPVectorPGrid(rc); hypre_ClearIndex(index); for (i= 0; i< ndim; i++) { index[i]= rfactors[i]-1; } tmp_boxarray = hypre_BoxArrayCreate(0); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_fine, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); identity_arrayboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray)); hypre_ForBoxI(ci, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci)); hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); /* all send and coarsened fboxes on this processor are collected */ intersect_boxes= hypre_BoxArrayCreate(0); for (i= 0; i< nboxman_entries; i++) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_AppendBox(&box, intersect_boxes); } hypre_SubtractBoxArrays(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci), intersect_boxes, tmp_boxarray); hypre_MinUnionBoxes(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci)); hypre_TFree(boxman_entries); hypre_BoxArrayDestroy(intersect_boxes); } } hypre_BoxArrayDestroy(tmp_boxarray); fac_restrict_data -> identity_arrayboxes= identity_arrayboxes; /*-------------------------------------------------------------------------- * fboxes that are coarsened. Some will be sent. We create the communication * pattern. For each fbox, we need a boxarray of sendboxes or ownboxes. * * Algorithm: Coarsen each fbox and see which cboxes it intersects using * BoxManIntersect. Cboxes that do not belong on the processor will have * a chunk sent to it. * * Note that no contraction is needed. Contraction can lead to erroneous * send_boxes. *--------------------------------------------------------------------------*/ interlevel_comm= hypre_CTAlloc(hypre_CommPkg *, nvars); fullwgt_sendboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); fullwgt_ownboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); own_cboxnums= hypre_CTAlloc(HYPRE_Int **, nvars); send_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); send_processes= hypre_CTAlloc(HYPRE_Int **, nvars); send_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars); pgrid= hypre_SStructPVectorPGrid(rf); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_crse, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); fullwgt_sendboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray)); fullwgt_ownboxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray)); own_cboxnums[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); send_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray)); send_processes[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); send_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); hypre_ForBoxI(fi, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box); hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); cnt1= 0; cnt2= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { cnt1++; } else { cnt2++; } } send_processes[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt1); send_remote_boxnums[vars][fi]= hypre_CTAlloc(HYPRE_Int, cnt1); own_cboxnums[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt2); cnt1= 0; cnt2= 0; for (i= 0; i< nboxman_entries; i++) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(fullwgt_sendboxes[vars], fi)); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(send_boxes[vars], fi)); send_processes[vars][fi][cnt1]= proc; hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i], &send_remote_boxnums[vars][fi][cnt1]); cnt1++; } else { hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(fullwgt_ownboxes[vars], fi)); hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i], &own_cboxnums[vars][fi][cnt2]); cnt2++; } } hypre_TFree(boxman_entries); } /* hypre_ForBoxI(fi, boxarray) */ } /* for (vars= 0; vars< nvars; vars++) */ (fac_restrict_data -> fullwgt_sendboxes)= fullwgt_sendboxes; (fac_restrict_data -> fullwgt_ownboxes)= fullwgt_ownboxes; (fac_restrict_data -> own_cboxnums)= own_cboxnums; /*-------------------------------------------------------------------------- * coarsened fboxes this processor will receive. * * Algorithm: For each cbox on this processor, refine it and find which * processors the refinement belongs in. The processors owning a chunk * are the recv_processors. *--------------------------------------------------------------------------*/ recv_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars); recv_processes= hypre_CTAlloc(HYPRE_Int **, nvars); /* dummy pointer for CommInfoCreate */ recv_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars); pgrid= hypre_SStructPVectorPGrid(rc); for (vars= 0; vars< nvars; vars++) { boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r), part_fine, vars); boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars)); recv_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray)); recv_processes[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); recv_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray)); hypre_ForBoxI(ci, boxarray) { hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box); hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&scaled_box)); hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index, rfactors, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); cnt1= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { cnt1++; } } recv_processes[vars][ci]= hypre_CTAlloc(HYPRE_Int, cnt1); recv_remote_boxnums[vars][ci]= hypre_CTAlloc(HYPRE_Int , cnt1); cnt1= 0; for (i= 0; i< nboxman_entries; i++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc); if (proc != myproc) { hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper); hypre_BoxSetExtents(&box, ilower, iupper); hypre_IntersectBoxes(&box, &scaled_box, &box); /* no contracting neede */ hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index, rfactors, hypre_BoxIMin(&box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index, rfactors, hypre_BoxIMax(&box)); hypre_AppendBox(&box, hypre_BoxArrayArrayBoxArray(recv_boxes[vars], ci)); recv_processes[vars][ci][cnt1]= proc; cnt1++; } /* if (proc != myproc) */ } /* for (i= 0; i< nmap_entries; i++) */ hypre_TFree(boxman_entries); } /* hypre_ForBoxI(ci, boxarray) */ } /* for (vars= 0; vars< nvars; vars++) */ num_values= 1; for (vars= 0; vars< nvars; vars++) { s_rc = hypre_SStructPVectorSVector(rc, vars); s_cvector= hypre_SStructPVectorSVector(fgrid_cvectors, vars); send_rboxes= hypre_BoxArrayArrayDuplicate(send_boxes[vars]); recv_rboxes= hypre_BoxArrayArrayDuplicate(recv_boxes[vars]); hypre_CommInfoCreate(send_boxes[vars], recv_boxes[vars], send_processes[vars], recv_processes[vars], send_remote_boxnums[vars], recv_remote_boxnums[vars], send_rboxes, recv_rboxes, 1, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructVectorDataSpace(s_cvector), hypre_StructVectorDataSpace(s_rc), num_values, NULL, 0, hypre_StructVectorComm(s_rc), &interlevel_comm[vars]); hypre_CommInfoDestroy(comm_info); } hypre_TFree(send_boxes); hypre_TFree(recv_boxes); hypre_TFree(send_processes); hypre_TFree(recv_processes); hypre_TFree(send_remote_boxnums); hypre_TFree(recv_remote_boxnums); (fac_restrict_data -> interlevel_comm)= interlevel_comm; return ierr; } HYPRE_Int hypre_FACRestrict2( void * fac_restrict_vdata, hypre_SStructVector * xf, hypre_SStructPVector * xc) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *restrict_data = fac_restrict_vdata; hypre_SStructPVector *fgrid_cvectors = restrict_data->fgrid_cvectors; hypre_BoxArrayArray **identity_arrayboxes= restrict_data->identity_arrayboxes; hypre_BoxArrayArray **fullwgt_ownboxes = restrict_data->fullwgt_ownboxes; HYPRE_Int ***own_cboxnums = restrict_data->own_cboxnums; hypre_CommPkg **interlevel_comm= restrict_data-> interlevel_comm; hypre_CommHandle *comm_handle; HYPRE_Int ndim = hypre_SStructVectorNDim(xf); hypre_BoxArrayArray *arrayarray_ownboxes; hypre_IndexRef stride; /* refinement factors */ hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_BoxArray *own_boxes; hypre_Box *own_box; HYPRE_Int *boxnums; hypre_Box *xc_temp_dbox; hypre_Box *xf_dbox; hypre_StructVector *xc_temp; hypre_StructVector *xc_var; hypre_StructVector *xf_var; HYPRE_Int xci; HYPRE_Int xfi; double ***xfp; double ***xcp; double ***xcp_temp; hypre_Index loop_size, lindex; hypre_Index start, fbox_size, node_offset; hypre_Index startc; hypre_Index stridec; hypre_Index rfactors; hypre_Index temp_index1, temp_index2; HYPRE_Int fi, ci; HYPRE_Int nvars, var; HYPRE_Int volume_crse_cell; HYPRE_Int i, j, k; HYPRE_Int imax, jmax, kmax; HYPRE_Int icell, jcell, kcell, ijkcell; double *sum; double scaling; HYPRE_Int part_crse= 0; HYPRE_Int part_fine= 1; HYPRE_Int num_coarse_cells; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ stride= (restrict_data -> stride); hypre_ClearIndex(stridec); for (i= 0; i< ndim; i++) { stridec[i]= 1; } hypre_CopyIndex(stride, rfactors); for (i= ndim; i< 3; i++) { rfactors[i]= 1; } volume_crse_cell= 1; for (i= 0; i< ndim; i++) { volume_crse_cell*= rfactors[i]; } /*----------------------------------------------------------------------- * We are assuming the refinement and coarsening have same variable * types. *-----------------------------------------------------------------------*/ nvars= hypre_SStructPVectorNVars(xc); /*----------------------------------------------------------------------- * For each coordinate direction, a fine node can contribute only to the * left or right cell=> only 2 coarse cells per direction. *-----------------------------------------------------------------------*/ num_coarse_cells= 1; for (i= 0; i< ndim; i++) { num_coarse_cells*= 2; } sum= hypre_CTAlloc(double, num_coarse_cells); /*-------------------------------------------------------------------------- * Scaling for averaging restriction. *--------------------------------------------------------------------------*/ scaling= 1.0; for (i= 0; i< ndim-2; i++) { scaling*= rfactors[0]; } /*----------------------------------------------------------------------- * Initialize the coarse vector to zero. *-----------------------------------------------------------------------*/ hypre_SStructPVectorSetConstantValues(xc, 0.0); /*----------------------------------------------------------------------- * Copy the coarse data: xf[part_crse] -> xc *-----------------------------------------------------------------------*/ hypre_SStructPartialPCopy(hypre_SStructVectorPVector(xf, part_crse), xc, identity_arrayboxes); /*----------------------------------------------------------------------- * Piecewise constant restriction over the refinement patch. * * Initialize the work vector by setting to zero. *-----------------------------------------------------------------------*/ hypre_SStructPVectorSetConstantValues(fgrid_cvectors, 0.0); /*----------------------------------------------------------------------- * Allocate memory for the data pointers. Assuming constant restriction. * We stride through the refinement patch by the refinement factors, and * so we must have pointers to the intermediate fine nodes=> xfp will * be size rfactors[2]*rfactors[1]. Because the fbox may not have the * ideal refinement form, we need to contribute to 2^ndim cells. *-----------------------------------------------------------------------*/ if (ndim > 1) { xcp_temp= hypre_TAlloc(double **, (ndim-1)); xcp = hypre_TAlloc(double **, (ndim-1)); for (k= 0; k< (ndim-1); k++) { xcp_temp[k]= hypre_TAlloc(double *, 2); xcp[k] = hypre_TAlloc(double *, 2); } } else /* 1d does not really require these double ptrs */ { xcp_temp = hypre_TAlloc(double **, 1); xcp = hypre_TAlloc(double **, 1); xcp_temp[0]= hypre_TAlloc(double *, 1); xcp[0] = hypre_TAlloc(double *, 1); } /* memory allocation of xfp is okay for all dimensions */ xfp= hypre_TAlloc(double **, rfactors[2]); for (k= 0; k< rfactors[2]; k++) { xfp[k]= hypre_TAlloc(double *, rfactors[1]); } for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xf_var= hypre_SStructPVectorSVector(hypre_SStructVectorPVector(xf,part_fine), var); fgrid = hypre_StructVectorGrid(xf_var); fgrid_boxes = hypre_StructGridBoxes(fgrid); cgrid = hypre_StructVectorGrid(xc_temp); cgrid_boxes = hypre_StructGridBoxes(cgrid); hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); /*-------------------------------------------------------------------- * Get the ptrs for the fine struct_vectors. *--------------------------------------------------------------------*/ xf_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(xf_var), fi); for (k= 0; k< rfactors[2]; k++) { for (j=0; j< rfactors[1]; j++) { hypre_SetIndex(temp_index1, 0, j, k); xfp[k][j]= hypre_StructVectorBoxData(xf_var, fi) + hypre_BoxOffsetDistance(xf_dbox, temp_index1); } } /*-------------------------------------------------------------------- * Get the ptrs for the coarse struct_vectors. Note that the coarse * work vector is indexed with respect to the local fine box no.'s. * Work vectors were created this way. * Dimensionally dependent. *--------------------------------------------------------------------*/ xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), fi); if (ndim > 1) { for (k= 0; k< (ndim-1); k++) { for (j=0; j< 2; j++) { hypre_SetIndex(temp_index1, 0, j, k); xcp_temp[k][j]= hypre_StructVectorBoxData(xc_temp, fi) + hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1); } } } else /* 1d case */ { hypre_ClearIndex(temp_index1); xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, fi) + hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1); } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), start); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fbox_size); /*-------------------------------------------------------------------- * Adjust "fbox_size" so that this hypre_Index is appropriate for * ndim < 3. * fbox_size= hypre_BoxIMax(fgrid_box)-hypre_BoxIMin(fgrid_box)+1. *--------------------------------------------------------------------*/ for (i= 0; i< 3; i++) { fbox_size[i]-= (start[i]-1); } /*-------------------------------------------------------------------- * The fine intersection box may not be divisible by the refinement * factor. We need to know the remainder to determine which * coarse node gets the restricted values. *--------------------------------------------------------------------*/ hypre_ClearIndex(node_offset); for (i= 0; i< ndim; i++) { node_offset[i]= rfactors[i]-(start[i]%rfactors[i])-1; } hypre_SetIndex(temp_index2, 0, 0, 0); hypre_StructMapFineToCoarse(start, temp_index2, rfactors, startc); hypre_BoxGetSize(fgrid_box, temp_index1); hypre_StructMapFineToCoarse(temp_index1, temp_index2, rfactors, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, xf_dbox, start, stride, xfi, xc_temp_dbox, startc, stridec, xci); #if 0 /* Are private static arrays a problem? */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci,imax,jmax,kmax,k,kcell,j,jcell,i,icell,ijkcell,temp_index2) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(xfi, xci) { /*----------------------------------------------------------------- * Arithmetic average the refinement patch values to get * restricted coarse grid values in an agglomerate; i.e., * piecewise constant restriction. *-----------------------------------------------------------------*/ hypre_BoxLoopGetIndex(lindex); imax= hypre_min( (fbox_size[0]-lindex[0]*stride[0]), rfactors[0] ); jmax= hypre_min( (fbox_size[1]-lindex[1]*stride[1]), rfactors[1] ); kmax= hypre_min( (fbox_size[2]-lindex[2]*stride[2]), rfactors[2] ); for (i= 0; i< num_coarse_cells; i++) { sum[i]= 0.0; } for (k= 0; k< kmax; k++) { kcell= 1; if (k <= node_offset[2]) { kcell= 0; } for (j= 0; j< jmax; j++) { jcell= 1; if (j <= node_offset[1]) { jcell= 0; } for (i= 0; i< imax; i++) { icell= 1; if (i <= node_offset[0]) { icell= 0; } MapCellRank(icell, jcell , kcell, ijkcell); sum[ijkcell]+= xfp[k][j][xfi+i]; } } } /*----------------------------------------------------------------- * Add the compute averages to the correct coarse cell. *-----------------------------------------------------------------*/ for (ijkcell= 0; ijkcell< num_coarse_cells; ijkcell++) { if (sum[ijkcell] != 0.0) { sum[ijkcell]/= scaling; InverseMapCellRank(ijkcell, temp_index2); i= temp_index2[0]; j= temp_index2[1]; k= temp_index2[2]; xcp_temp[k][j][xci+i]+= sum[ijkcell]; } } } hypre_BoxLoop2End(xfi, xci); } /* hypre_ForBoxI(fi, fgrid_boxes) */ } /* for (var= 0; var< nvars; var++)*/ /*------------------------------------------------------------------ * Communicate calculated restricted function over the coarsened * patch. Only actual communicated values will be put in the * coarse vector. *------------------------------------------------------------------*/ for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xc_var= hypre_SStructPVectorSVector(xc, var); hypre_InitializeCommunication(interlevel_comm[var], hypre_StructVectorData(xc_temp), hypre_StructVectorData(xc_var), 0, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); } /*------------------------------------------------------------------ * Need to add the coarsened patches that belong on this processor * to the coarse vector. *------------------------------------------------------------------*/ for (var= 0; var< nvars; var++) { xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var); xc_var= hypre_SStructPVectorSVector(xc, var); cgrid = hypre_StructVectorGrid(xc_temp); cgrid_boxes = hypre_StructGridBoxes(cgrid); arrayarray_ownboxes= fullwgt_ownboxes[var]; hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), ci); xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, ci); /*-------------------------------------------------------------- * Each ci box of cgrid_box has a boxarray of subboxes. Copy * each of these subboxes to the coarse vector. *--------------------------------------------------------------*/ own_boxes= hypre_BoxArrayArrayBoxArray(arrayarray_ownboxes, ci); boxnums = own_cboxnums[var][ci]; hypre_ForBoxI(i, own_boxes) { own_box= hypre_BoxArrayBox(own_boxes, i); xf_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_var), boxnums[i]); xcp[0][0]= hypre_StructVectorBoxData(xc_var, boxnums[i]); hypre_BoxGetSize(own_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, xc_temp_dbox, hypre_BoxIMin(own_box), stridec, xfi, xf_dbox, hypre_BoxIMin(own_box), stridec, xci); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xfi, xci) { xcp[0][0][xci]+= xcp_temp[0][0][xfi]; } hypre_BoxLoop2End(xfi, xci); } /* hypre_ForBoxI(i, own_boxes) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ } /* for (var= 0; var< nvars; var++) */ hypre_TFree(sum); for (k= 0; k< rfactors[2]; k++) { hypre_TFree(xfp[k]); } hypre_TFree(xfp); if (ndim > 1) { for (k= 0; k< (ndim-1); k++) { hypre_TFree(xcp_temp[k]); hypre_TFree(xcp[k]); } } else { hypre_TFree(xcp_temp[0]); hypre_TFree(xcp[0]); } hypre_TFree(xcp_temp); hypre_TFree(xcp); return ierr; } /*-------------------------------------------------------------------------- * hypre_FacSemiRestrictDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSemiRestrictDestroy2( void *fac_restrict_vdata ) { HYPRE_Int ierr = 0; hypre_FacSemiRestrictData2 *fac_restrict_data = fac_restrict_vdata; HYPRE_Int nvars; HYPRE_Int i, j; if (fac_restrict_data) { nvars= (fac_restrict_data-> nvars); hypre_SStructPVectorDestroy(fac_restrict_data-> fgrid_cvectors); for (i= 0; i< nvars; i++) { hypre_BoxArrayArrayDestroy((fac_restrict_data -> identity_arrayboxes)[i]); hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_sendboxes)[i]); for (j= 0; j< hypre_BoxArrayArraySize(fac_restrict_data->fullwgt_ownboxes[i]); j++) { hypre_TFree((fac_restrict_data -> own_cboxnums)[i][j]); } hypre_TFree((fac_restrict_data -> own_cboxnums)[i]); hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_ownboxes)[i]); hypre_CommPkgDestroy((fac_restrict_data -> interlevel_comm)[i]); } hypre_TFree(fac_restrict_data -> identity_arrayboxes); hypre_TFree(fac_restrict_data -> fullwgt_sendboxes); hypre_TFree(fac_restrict_data -> own_cboxnums); hypre_TFree(fac_restrict_data -> fullwgt_ownboxes); hypre_TFree(fac_restrict_data -> interlevel_comm); hypre_TFree(fac_restrict_data); } return ierr; }
YAKL_mem_transfers.h
#pragma once template <class T> inline void memcpy_host_to_host(T *dst , T *src , index_t elems) { for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } } template <class T> inline void memcpy_device_to_host(T *dst , T *src , index_t elems) { #ifdef YAKL_ARCH_CUDA cudaMemcpyAsync(dst,src,elems*sizeof(T),cudaMemcpyDeviceToHost,0); check_last_error(); #elif defined(YAKL_ARCH_HIP) hipMemcpyAsync(dst,src,elems*sizeof(T),hipMemcpyDeviceToHost,0); check_last_error(); #elif defined (YAKL_ARCH_SYCL) sycl_default_stream.memcpy(dst, src, elems*sizeof(T)); check_last_error(); #elif defined(YAKL_ARCH_OPENMP45) omp_target_memcpy(dst,src,elems*sizeof(T),0,0,omp_get_initial_device(),omp_get_default_device()); check_last_error(); #elif defined(YAKL_ARCH_OPENMP) #pragma omp parallel for for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #else for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #endif #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } template <class T> inline void memcpy_host_to_device(T *dst , T *src , index_t elems) { #ifdef YAKL_ARCH_CUDA cudaMemcpyAsync(dst,src,elems*sizeof(T),cudaMemcpyHostToDevice,0); check_last_error(); #elif defined(YAKL_ARCH_HIP) hipMemcpyAsync(dst,src,elems*sizeof(T),hipMemcpyHostToDevice,0); check_last_error(); #elif defined (YAKL_ARCH_SYCL) sycl_default_stream.memcpy(dst, src, elems*sizeof(T)); check_last_error(); #elif defined(YAKL_ARCH_OPENMP45) omp_target_memcpy(dst,src,elems*sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device()); check_last_error(); #elif defined(YAKL_ARCH_OPENMP) #pragma omp parallel for for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #else for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #endif #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } template <class T> inline void memcpy_device_to_device(T *dst , T *src , index_t elems) { #ifdef YAKL_ARCH_CUDA cudaMemcpyAsync(dst,src,elems*sizeof(T),cudaMemcpyDeviceToDevice,0); check_last_error(); #elif defined(YAKL_ARCH_HIP) hipMemcpyAsync(dst,src,elems*sizeof(T),hipMemcpyDeviceToDevice,0); check_last_error(); #elif defined (YAKL_ARCH_SYCL) sycl_default_stream.memcpy(dst, src, elems*sizeof(T)); check_last_error(); #elif defined(YAKL_ARCH_OPENMP45) omp_target_memcpy(dst,src,elems*sizeof(T),0,0,omp_get_default_device(),omp_get_default_device()); check_last_error(); #elif defined(YAKL_ARCH_OPENMP) #pragma omp parallel for for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #else for (index_t i=0; i<elems; i++) { dst[i] = src[i]; } #endif #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif }
md5_bmark.c
/* * MD5 Benchmark * ------------- * File: md5_bmark.c * * This is the main file for the md5 benchmark kernel. This benchmark was * written as part of the StarBENCH benchmark suite at TU Berlin. It performs * MD5 computation on a number of self-generated input buffers in parallel, * automatically measuring execution time. * * Copyright (C) 2011 Michael Andersch * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <sys/time.h> #include <omp.h> #include "md5.h" #include "md5_bmark.h" typedef struct timeval timer; #define TIME(x) gettimeofday(&x, NULL) /* Function declarations */ int initialize(md5bench_t* args); int finalize(md5bench_t* args); void run(md5bench_t* args); void process(uint8_t* in, uint8_t* out, int bufsize); void listInputs(); long timediff(timer* starttime, timer* finishtime); // Input configurations static data_t datasets[] = { {64, 512, 0}, {64, 1024, 0}, {64, 2048, 0}, {64, 4096, 0}, {128, 1024*512, 1}, {128, 1024*1024, 1}, {128, 1024*2048, 1}, {128, 1024*4096, 1}, }; /* * Function: initialize * -------------------- * To initialize the benchmark parameters. Generates the input buffers from random data. */ int initialize(md5bench_t* args) { int index = args->input_set; if(index < 0 || index >= sizeof(datasets)/sizeof(datasets[0])) { fprintf(stderr, "Invalid input set specified! Clamping to set 0\n"); index = 0; } args->numinputs = datasets[index].numbufs; args->size = datasets[index].bufsize; args->inputs = (uint8_t**)calloc(args->numinputs, sizeof(uint8_t*)); args->out = (uint8_t*)calloc(args->numinputs, DIGEST_SIZE); if(args->inputs == NULL || args->out == NULL) { fprintf(stderr, "Memory Allocation Error\n"); return -1; } //fprintf(stderr, "Reading input set: %d buffers, %d bytes per buffer\n", datasets[index].numbufs, datasets[index].bufsize); // Now the input buffers need to be generated, for replicability, use same seed srand(datasets[index].rseed); for(int i = 0; i < args->numinputs; i++) { args->inputs[i] = (uint8_t*)malloc(sizeof(uint8_t)*datasets[index].bufsize); uint8_t* p = args->inputs[i]; if(p == NULL) { fprintf(stderr, "Memory Allocation Error\n"); return -1; } for(int j = 0; j < datasets[index].bufsize; j++) *p++ = rand() % 255; } return 0; } /* * Function: process * ----------------- * Processes one input buffer, delivering the digest into out. */ void process(uint8_t* in, uint8_t* out, int bufsize) { MD5_CTX context; uint8_t digest[16]; MD5_Init(&context); MD5_Update(&context, in, bufsize); MD5_Final(digest, &context); memcpy(out, digest, DIGEST_SIZE); } /* * Function: run * -------------------- * Main benchmarking function. If called, processes buffers with MD5 * until no more buffers available. The resulting message digests * are written into consecutive locations in the preallocated output * buffer. */ void run(md5bench_t* args) { for(int i = 0; i < args->iterations; i++) { #pragma omp master { int buffers_to_process = args->numinputs; int next = 0; uint8_t** in = args->inputs; uint8_t* out = args->out; for (next = 0; next < buffers_to_process; next++) { #pragma omp task { process(in[next], out+next*DIGEST_SIZE, args->size); } } } } } /* * Function: finalize * ------------------ * Cleans up memory used by the benchmark for input and output buffers. */ int finalize(md5bench_t* args) { char buffer[64]; int offset = 0; for(int i = 0; i < args->numinputs; i++) { #ifdef DEBUG sprintf(buffer, "Buffer %d has checksum ", i); fwrite(buffer, sizeof(char), strlen(buffer)+1, stdout); #endif for(int j = 0; j < DIGEST_SIZE*2; j+=2) { sprintf(buffer+j, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf); sprintf(buffer+j+1, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf0); } buffer[32] = '\0'; #ifdef DEBUG fwrite(buffer, sizeof(char), 32, stdout); fputc('\n', stdout); #else printf("%s ", buffer); #endif } #ifndef DEBUG printf("\n"); #endif if(args->inputs) { for(int i = 0; i < args->numinputs; i++) { if(args->inputs[i]) free(args->inputs[i]); } free(args->inputs); } if(args->out) free(args->out); return 0; } /* * Function: timediff * ------------------ * Compute the difference between timers starttime and finishtime in msecs. */ long timediff(timer* starttime, timer* finishtime) { long msec; msec=(finishtime->tv_sec-starttime->tv_sec)*1000; msec+=(finishtime->tv_usec-starttime->tv_usec)/1000; return msec; } /** MAIN **/ int main(int argc, char** argv) { timer b_start, b_end; md5bench_t args; //nt = number of threads int nt; //Receber parâmetros scanf("%d", &nt); scanf("%d", &args.input_set); scanf("%d", &args.iterations); args.outflag = 1; // Parameter initialization if(initialize(&args)) { fprintf(stderr, "Initialization Error\n"); exit(EXIT_FAILURE); } TIME(b_start); #pragma omp parallel num_threads(nt) run(&args); TIME(b_end); // Free memory if(finalize(&args)) { fprintf(stderr, "Finalization Error\n"); exit(EXIT_FAILURE); } double b_time = (double)timediff(&b_start, &b_end)/1000; printf("%.3f\n", b_time); return 0; }
spmm_x_csc.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <stdbool.h> #include <memory.h> #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_SPMAT_CSC *A, const ALPHA_SPMAT_CSC *B, ALPHA_SPMAT_CSC **matC) { check_return(A->cols != B->rows, ALPHA_SPARSE_STATUS_INVALID_VALUE); ALPHA_SPMAT_CSC *mat = alpha_malloc(sizeof(ALPHA_SPMAT_CSC)); *matC = mat; mat->rows = A->rows; mat->cols = B->cols; ALPHA_INT m = A->rows; ALPHA_INT n = B->cols; bool *flag = alpha_memalign(sizeof(bool) * m, DEFAULT_ALIGNMENT); ALPHA_INT nnz = 0; ALPHA_INT num_thread = alpha_get_thread_num(); for (ALPHA_INT bc = 0; bc < n; bc++) { memset(flag, '\0', sizeof(bool) * m); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) reduction(+:nnz) #endif for (ALPHA_INT bi = B->cols_start[bc]; bi < B->cols_end[bc]; bi++) { ALPHA_INT ac = B->row_indx[bi]; for (ALPHA_INT ai = A->cols_start[ac]; ai < A->cols_end[ac]; ai++) { if (!flag[A->row_indx[ai]]) { nnz += 1; flag[A->row_indx[ai]] = true; } } } } alpha_free(flag); ALPHA_INT *col_offset = alpha_memalign(sizeof(ALPHA_INT) * (n + 1), DEFAULT_ALIGNMENT); mat->cols_start = col_offset; mat->cols_end = col_offset + 1; mat->row_indx = alpha_memalign(nnz * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); mat->values = alpha_memalign(nnz * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); ALPHA_Number *values = alpha_memalign(sizeof(ALPHA_Number) * m, DEFAULT_ALIGNMENT); ALPHA_INT index = 0; mat->cols_start[0] = 0; for (ALPHA_INT bc = 0; bc < n; bc++) { memset(values, '\0', sizeof(ALPHA_Number) * m); bool *flagg = alpha_memalign(sizeof(bool) * m, DEFAULT_ALIGNMENT); memset(flagg, '\0', sizeof(bool) * m); for (ALPHA_INT bi = B->cols_start[bc]; bi < B->cols_end[bc]; bi++) { ALPHA_INT ac = B->row_indx[bi]; ALPHA_Number bv = B->values[bi]; bv = B->values[bi]; for (ALPHA_INT ai = A->cols_start[ac]; ai < A->cols_end[ac]; ai++) { ALPHA_INT ar = A->row_indx[ai]; ALPHA_Number tmp; alpha_mul(tmp, bv, A->values[ai]); alpha_adde(values[ar], tmp); flagg[ar] = true; } } for (ALPHA_INT r = 0; r < m; r++) { if(flagg[r]) { mat->row_indx[index] = r; mat->values[index] = values[r]; index += 1; } } mat->cols_end[bc] = index; alpha_free(flagg); } alpha_free(values); return ALPHA_SPARSE_STATUS_SUCCESS; }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright @ 2008 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define FOURCC_DX10 0x30315844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #define DDSEXT_DIMENSION_TEX2D 0x00000003 #define DDSEXTFLAGS_CUBEMAP 0x00000004 typedef enum DXGI_FORMAT { DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT, DXGI_FORMAT_R32G32B32A32_UINT, DXGI_FORMAT_R32G32B32A32_SINT, DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT, DXGI_FORMAT_R32G32B32_UINT, DXGI_FORMAT_R32G32B32_SINT, DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_FORMAT_R16G16B16A16_UNORM, DXGI_FORMAT_R16G16B16A16_UINT, DXGI_FORMAT_R16G16B16A16_SNORM, DXGI_FORMAT_R16G16B16A16_SINT, DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT, DXGI_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_SINT, DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_D32_FLOAT_S8X24_UINT, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, DXGI_FORMAT_X32_TYPELESS_G8X24_UINT, DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_FORMAT_R10G10B10A2_UINT, DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R8G8B8A8_SNORM, DXGI_FORMAT_R8G8B8A8_SINT, DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT, DXGI_FORMAT_R16G16_UNORM, DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R16G16_SNORM, DXGI_FORMAT_R16G16_SINT, DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_D32_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R32_SINT, DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_D24_UNORM_S8_UINT, DXGI_FORMAT_R24_UNORM_X8_TYPELESS, DXGI_FORMAT_X24_TYPELESS_G8_UINT, DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM, DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R8G8_SNORM, DXGI_FORMAT_R8G8_SINT, DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_D16_UNORM, DXGI_FORMAT_R16_UNORM, DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R16_SNORM, DXGI_FORMAT_R16_SINT, DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_R8_UINT, DXGI_FORMAT_R8_SNORM, DXGI_FORMAT_R8_SINT, DXGI_FORMAT_A8_UNORM, DXGI_FORMAT_R1_UNORM, DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R8G8_B8G8_UNORM, DXGI_FORMAT_G8R8_G8B8_UNORM, DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC1_UNORM_SRGB, DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC2_UNORM_SRGB, DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM, DXGI_FORMAT_BC3_UNORM_SRGB, DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM, DXGI_FORMAT_BC4_SNORM, DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM, DXGI_FORMAT_BC5_SNORM, DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM, DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, DXGI_FORMAT_B8G8R8X8_TYPELESS, DXGI_FORMAT_B8G8R8X8_UNORM_SRGB, DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16, DXGI_FORMAT_BC6H_SF16, DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM, DXGI_FORMAT_BC7_UNORM_SRGB, DXGI_FORMAT_AYUV, DXGI_FORMAT_Y410, DXGI_FORMAT_Y416, DXGI_FORMAT_NV12, DXGI_FORMAT_P010, DXGI_FORMAT_P016, DXGI_FORMAT_420_OPAQUE, DXGI_FORMAT_YUY2, DXGI_FORMAT_Y210, DXGI_FORMAT_Y216, DXGI_FORMAT_NV11, DXGI_FORMAT_AI44, DXGI_FORMAT_IA44, DXGI_FORMAT_P8, DXGI_FORMAT_A8P8, DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_P208, DXGI_FORMAT_V208, DXGI_FORMAT_V408, DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE, DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE, DXGI_FORMAT_FORCE_UINT } DXGI_FORMAT; #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2, extFormat, extDimension, extFlags, extArraySize, extFlags2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _BC7Colors { unsigned char r[6], g[6], b[6], a[6]; } BC7Colors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef struct _BC7ModeInfo { unsigned char partition_bits, num_subsets, color_precision, alpha_precision, num_pbits, index_precision, index2_precision; } BC7ModeInfo; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 }; static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 }; static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64 }; /* stores info for each mode of BC7 */ static const BC7ModeInfo BC7_mode_info[8] = { { 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */ { 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */ { 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */ { 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */ { 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */ { 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */ { 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */ { 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */ }; static const unsigned char BC7_partition_table[2][64][16] = { { /* BC7 Partition Set for 2 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 }, { 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 }, { 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 }, { 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 }, { 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 }, { 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 }, { 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 } }, { /* BC7 Partition Set for 3 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 }, { 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 }, { 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 }, { 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 }, { 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 }, { 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 }, { 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 }, { 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 }, { 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 }, { 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 }, { 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 }, { 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 }, { 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 }, { 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 }, { 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 }, { 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 }, { 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 }, { 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 }, { 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 } } }; static const unsigned char BC7_anchor_index_table[4][64] = { /* Anchor index values for the first subset */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Anchor index values for the second subset of two-subset partitioning */ { 15,15,15,15,15,15,15,15, 15,15,15,15,15,15,15,15, 15, 2, 8, 2, 2, 8, 8,15, 2, 8, 2, 2, 8, 8, 2, 2, 15,15, 6, 8, 2, 8,15,15, 2, 8, 2, 2, 2,15,15, 6, 6, 2, 6, 8,15,15, 2, 2, 15,15,15,15,15, 2, 2,15 }, /* Anchor index values for the second subset of three-subset partitioning */ { 3, 3,15,15, 8, 3,15,15, 8, 8, 6, 6, 6, 5, 3, 3, 3, 3, 8,15, 3, 3, 6,10, 5, 8, 8, 6, 8, 5,15,15, 8,15, 3, 5, 6,10, 8,15, 15, 3,15, 5,15,15,15,15, 3,15, 5, 5, 5, 8, 5,10, 5,10, 8,13,15,12, 3, 3 }, /* Anchor index values for the third subset of three-subset partitioning */ { 15, 8, 8, 3,15,15, 3, 8, 15,15,15,15,15,15,15, 8, 15, 8,15, 3,15, 8,15, 8, 3,15, 6,10,15,15,10, 8, 15, 3,15,10,10, 8, 9,10, 6,15, 8,15, 3, 6, 6, 8, 15, 3,15,15,15,15,15,15, 15,15,15,15, 3,15,15, 8 } }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static inline unsigned char GetSubsetIndex(unsigned char numSubsets, unsigned char partition_id,size_t pixelIndex) { if (numSubsets == 2) return BC7_partition_table[0][partition_id][pixelIndex]; if (numSubsets == 3) return BC7_partition_table[1][partition_id][pixelIndex]; return 0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ /* Read optional DX10 header if available */ if ((dds_info->pixelformat.flags & DDPF_FOURCC) && (dds_info->pixelformat.fourcc == FOURCC_DX10)) { dds_info->extFormat = ReadBlobLSBLong(image); dds_info->extDimension = ReadBlobLSBLong(image); dds_info->extFlags = ReadBlobLSBLong(image); dds_info->extArraySize = ReadBlobLSBLong(image); dds_info->extFlags2 = ReadBlobLSBLong(image); } else { dds_info->extFormat = 0; dds_info->extDimension = 0; dds_info->extFlags = 0; dds_info->extArraySize = 0; dds_info->extFlags2 = 0; } return(MagickTrue); } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; Quantum *q; ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static unsigned char GetBit(const unsigned char *block,size_t *start_bit) { size_t base, index; index=(*start_bit) >> 3; base=(*start_bit) - (index << 3); (*start_bit)++; if (index > 15) return(0); return((block[index] >> base) & 0x01); } static unsigned char GetBits(const unsigned char *block,size_t *start_bit, unsigned char num_bits) { size_t base, first_bits, index, next_bits; unsigned char ret; index=(*start_bit) >> 3; base=(*start_bit)-(index << 3); if (index > 15) return(0); if (base + num_bits > 8) { first_bits=8-base; next_bits=num_bits-first_bits; ret=((block[index] >> base) | (((block[index + 1]) & ((1u << next_bits) - 1)) << first_bits)); } else { ret=((block[index] >> base) & ((1 << num_bits) - 1)); } (*start_bit)+=num_bits; return(ret); } static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index, unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id) { size_t table_index; /* for first subset */ if (subset_index == 0) table_index=0; /* for second subset of two subset partitioning */ else if ((subset_index == 1) && (num_subsets == 2)) table_index=1; /* for second subset of three subset partitioning */ else if ((subset_index == 1) && (num_subsets == 3)) table_index=2; /* for third subset of three subset partitioning */ else table_index=3; if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex) return(MagickTrue); else return(MagickFalse); } static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block, size_t mode,size_t *start_bit) { MagickBooleanType has_alpha, has_pbits; unsigned char alpha_bits, color_bits, pbit, pbit0, pbit1; size_t num_subsets, i; num_subsets=(size_t) BC7_mode_info[mode].num_subsets; color_bits=BC7_mode_info[mode].color_precision; /* red */ for (i=0; i < num_subsets * 2; i++) endpoints->r[i]=GetBits(block,start_bit,color_bits); /* green */ for (i=0; i < num_subsets * 2; i++) endpoints->g[i]=GetBits(block,start_bit,color_bits); /* blue */ for (i=0; i < num_subsets * 2; i++) endpoints->b[i]=GetBits(block,start_bit,color_bits); /* alpha */ alpha_bits=BC7_mode_info[mode].alpha_precision; has_alpha=mode >= 4 ? MagickTrue : MagickFalse; if (has_alpha != MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=GetBits(block,start_bit,alpha_bits); } /* handle modes that have p bits */ has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7) ? MagickTrue : MagickFalse; if (has_pbits != MagickFalse) { for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= 1; endpoints->g[i] <<= 1; endpoints->b[i] <<= 1; endpoints->a[i] <<= 1; } /* mode 1 shares a p-bit for both endpoints */ if (mode == 1) { pbit0=GetBit(block,start_bit); pbit1=GetBit(block,start_bit); endpoints->r[0] |= pbit0; endpoints->g[0] |= pbit0; endpoints->b[0] |= pbit0; endpoints->r[1] |= pbit0; endpoints->g[1] |= pbit0; endpoints->b[1] |= pbit0; endpoints->r[2] |= pbit1; endpoints->g[2] |= pbit1; endpoints->b[2] |= pbit1; endpoints->r[3] |= pbit1; endpoints->g[3] |= pbit1; endpoints->b[3] |= pbit1; } else { for (i=0; i < num_subsets * 2; i++) { pbit=GetBit(block,start_bit); endpoints->r[i] |= pbit; endpoints->g[i] |= pbit; endpoints->b[i] |= pbit; endpoints->a[i] |= pbit; } } } /* 1 bit increased due to the pbit */ if (has_pbits != MagickFalse) { color_bits++; alpha_bits++; } /* color and alpha bit shifting so that MSB lies in bit 7 */ for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= (8 - color_bits); endpoints->g[i] <<= (8 - color_bits); endpoints->b[i] <<= (8 - color_bits); endpoints->a[i] <<= (8 - alpha_bits); endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits); endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits); endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits); endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> alpha_bits); } if (has_alpha == MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=255; } } static MagickBooleanType ReadBC7Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { BC7Colors colors; Quantum *q; size_t mode, start_bit; ssize_t count, i, x, y; unsigned char a, alpha_indices[16], b, block[16], c0, c1, color_indices[16], g, index_prec, index2_prec, num_bits, num_subsets, partition_id, r, rotation, selector_bit, subset_indices[16], weight; magick_unreferenced(dds_info); memset(alpha_indices,0,sizeof(alpha_indices)); memset(block,0,sizeof(block)); memset(color_indices,0,sizeof(color_indices)); memset(subset_indices,0,sizeof(subset_indices)); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { size_t area; /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 16 bytes of data from the image */ count=ReadBlob(image,16,block); if (count != 16) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Get the mode of the block */ start_bit=0; while (start_bit <= 8 && !GetBit(block, &start_bit)) {} mode=start_bit-1; if (mode > 7) return(MagickFalse); num_subsets=BC7_mode_info[mode].num_subsets; partition_id=0; /* only these modes have more than 1 subset */ if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7)) { partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits); if (partition_id > 63) return(MagickFalse); } rotation=0; if ((mode == 4) || (mode == 5)) rotation=GetBits(block,&start_bit,2); selector_bit=0; if (mode == 4) selector_bit=GetBit(block, &start_bit); ReadEndpoints(&colors,block,mode,&start_bit); index_prec=BC7_mode_info[mode].index_precision; index2_prec=BC7_mode_info[mode].index2_precision; if ((mode == 4) && (selector_bit == 1)) { index_prec=3; alpha_indices[0]=GetBit(block,&start_bit); for (i = 1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,2); } /* get color and subset indices */ for (i=0; i < 16; i++) { subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i); num_bits=index_prec; if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id)) num_bits--; color_indices[i]=GetBits(block,&start_bit,num_bits); } /* get alpha indices if the block has it */ if ((mode == 5) || ((mode == 4) && (selector_bit == 0))) { alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1); for (i=1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,index2_prec); } /* Write the pixels */ area=MagickMin(MagickMin(4,image->columns-x)*MagickMin(4,image->rows-y), 16); for (i=0; i < (ssize_t) area; i++) { unsigned char c2; c0=2 * subset_indices[i]; c1=(2 * subset_indices[i]) + 1; c2=color_indices[i]; weight=64; /* Color Interpolation */ switch(index_prec) { case 2: if (c2 < sizeof(BC7_weight2)) weight=BC7_weight2[c2]; break; case 3: if (c2 < sizeof(BC7_weight3)) weight=BC7_weight3[c2]; break; default: if (c2 < sizeof(BC7_weight4)) weight=BC7_weight4[c2]; } r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6; g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6; b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6; a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; /* Interpolate alpha for mode 4 and 5 blocks */ if (mode == 4 || mode == 5) { unsigned char a0; a0=alpha_indices[i]; if (a0 < sizeof(BC7_weight2)) weight=BC7_weight2[a0]; if ((mode == 4) && (selector_bit == 0) && (a0 < sizeof(BC7_weight3))) weight=BC7_weight3[a0]; if ((c0 < sizeof(colors.a)) && (c1 < sizeof(colors.a))) a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; } switch (rotation) { case 1: Swap(a,r); break; case 2: Swap(a,g); break; case 3: Swap(a,b); break; } SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32 || dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) alphaBits=1; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM || IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000)) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case FOURCC_DX10: { if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D) { ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } switch (dds_info.extFormat) { case DXGI_FORMAT_R8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G6R5_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G5R5A1_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_R8G8B8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8X8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_BC1_UNORM: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case DXGI_FORMAT_BC2_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case DXGI_FORMAT_BC3_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case DXGI_FORMAT_BC7_UNORM: case DXGI_FORMAT_BC7_UNORM_SRGB: { alpha_trait = BlendPixelTrait; compression = BC7Compression; decoder = ReadBC7; break; } default: { /* Unknown format */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP) cubemap = MagickTrue; num_images = dds_info.extArraySize; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteDDSImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { ssize_t x; ssize_t i, y, bx, by; const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { const Quantum *p; ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
DRB090-static-local-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* For a variable declared in a scope inside an OpenMP construct: * private if the variable has an automatic storage duration * shared if the variable has a static storage duration. Dependence pairs: tmp@73:5 vs. tmp@73:5 tmp@73:5 vs. tmp@74:12 */ #include<stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[len], b[len]; #pragma omp parallel for private(i) for (i=0;i<len;i++) { a[i]=i; b[i]=i; } /* static storage for a local variable */ #pragma omp parallel { static int tmp; #pragma omp for private(i, tmp) for (i=0;i<len;i++) { tmp = a[i]+i; a[i] = tmp; } } /* automatic storage for a local variable */ #pragma omp parallel { int tmp; #pragma omp for private(i, tmp) for (i=0;i<len;i++) { tmp = b[i]+i; b[i] = tmp; } } printf("a[50]=%d b[50]=%d\n", a[50], b[50]); return 0; }
mwac_utils.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> //#include <plot.h> #include "mwac_utils.h" #include "antenna_mapping.h" void fill_mapping_matrix() { extern map_t corr_mapping[NINPUT][NINPUT]; extern int pfb_output_to_input[NINPUT]; extern int single_pfb_mapping[64]; extern int npol; extern int nstation; int inp1 = 0, inp2 = 0; int pol1 = 0, pol2 = 0; int index1 = 0, index2 = 0; int p=0,npfb = 4; // Output matrix has ordering // [channel][station][station][polarization][polarization][complexity] for (p=0;p<npfb;p++) { for (inp1=0;inp1<64;inp1++) { pfb_output_to_input[(p*64) + inp1] = single_pfb_mapping[inp1] + (p*64); } } for (inp1 = 0; inp1 < nstation; inp1++) { for (inp2 = 0; inp2 < nstation; inp2++) { for (pol1 = 0; pol1 < npol; pol1++) { for (pol2 = 0; pol2 < npol; pol2++) { index1 = inp1 * npol + pol1; index2 = inp2 * npol + pol2; /* fprintf(stdout, "inp1 %d pol1 %d inp2 %d pol2 %d map to index1 %d and index2 %d\n", inp1, pol1, inp2, pol2, index1, index2); fprintf(stdout, "these map to PFB input numbers: %d and %d\n", pfb_output_to_input[index1], pfb_output_to_input[index2]); */ corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].stn1 = inp1; // this should give us the pfb input corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].stn2 = inp2; corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].pol1 = pol1; corr_mapping[pfb_output_to_input[index1]][pfb_output_to_input[index2]].pol2 = pol2; } } } } } void get_baseline(int st1, int st2, int pol1, int pol2, float complex *data, float complex *baseline) { int i, j, k, l, m; float complex *in, *out; extern int npol; extern int nstation; extern int nfrequency; in = data; out = baseline; for (i = 0; i < nfrequency; i++) { for (j = 0; j < nstation; j++) { for (k = 0; k < nstation; k++) { for (l = 0; l < npol; l++) { for (m = 0; m < npol; m++) { if (j == st1 && k == st2) { if (l == pol1 && m == pol2) { *out = *in; out++; // fprintf(stdout,"%f %f\n",crealf(*in),cimagf(*in)); } } in++; } } } } } } void get_baseline_lu(int st1, int st2, int pol1, int pol2, float complex *data, float complex *baseline) { int i=0; float complex *in, *out; extern int npol; extern int nstation; extern int nfrequency; off_t in_index=0,offset,stride; in = data; out = baseline; /* direct lookup */ // offset = (st1*nstation*npol*npol) + (st2*npol*npol) + (pol1*npol) + pol2; offset = npol*((st1*nstation*npol) + (st2*npol) + pol1) + pol2; stride = (nstation*nstation*npol*npol); for (i=0;i<nfrequency;i++) { in_index = i*stride + offset; out[i] = in[in_index]; } } void get_baseline_r(int st1, int st2, int pol1, int pol2, float complex *data, float complex *reorder,int npol, int nstation, int nfrequency,int true_st1,int true_st2, int true_pol1,int true_pol2,int conjugate) { int i=0; float complex *in, *out; size_t out_index =0, in_index=0;; in = data; out = reorder; /* direct lookup */ for (i=0;i<nfrequency;i++) { in_index = i*(nstation*nstation*npol*npol) + (st1*nstation*npol*npol) + (st2*npol*npol) + (pol1*npol) + pol2; out_index = i*(nstation*(nstation+1)*npol*npol/2) + (((true_st1*nstation) - ((true_st1+1)/2)*true_st1) + true_st2)*npol*npol + (pol1*npol) + pol2; if (!conjugate) { out[out_index] = in[in_index]; } else { if (st2>st1) { out[out_index] = conj(in[in_index]); } } } } // full reorder using the correct mapping - takes the input cube and produces a packed triangular output // in the correct order // wacky packed tile order to packed triangular void full_reorder(float complex *full_matrix_h, float complex *reordered) { extern int npol; extern int nstation; extern int nfrequency; extern map_t corr_mapping[NINPUT][NINPUT]; int t1=0; int t2=0; int p1=0; int p2=0; long long baseline_count = 0; for (t1 = 0; t1 < nstation; t1++) { for (t2 = t1; t2 < nstation; t2++) { for (p1 = 0;p1 < npol;p1++) { for (p2 =0; p2 < npol; p2++) { baseline_count++; int index1 = t1 * npol + p1; int index2 = t2 * npol + p2; /* fprintf(stdout, "requesting ant1 %d ant 2 %d pol1 %d pol2 %d", antenna1, antenna2, pol1, pol2); */ map_t the_mapping = corr_mapping[index1][index2]; int conjugate = 0; /* fprintf(stdout, "input ant/pol combination decodes to stn1 %d stn2 %d pol1 %d pol2 %d\n", the_mapping.stn1, the_mapping.stn2, the_mapping.pol1, the_mapping.pol2); */ if (the_mapping.stn2 > the_mapping.stn1) { conjugate = 1; } else { conjugate = 0; } get_baseline_r(the_mapping.stn1, the_mapping.stn2, the_mapping.pol1, the_mapping.pol2, full_matrix_h, reordered,npol,nstation,nfrequency,conjugate,t1,t2,p1,p2); } } } } // now reoredered should contain a triagular packed array in the correct order } // Extracts the full matrix from the packed Hermitian form void extractMatrix(float complex *matrix, float complex *packed) { int f; extern int npol; extern int nstation; extern int nfrequency; /* use openmp to parallelise this. In single threaded version, this task takes 1/3 the overall CPU time, so 4 threads should be plenty to make this negligible */ omp_set_num_threads(4); #pragma omp parallel private (f) { #pragma omp for for (f = 0; f < nfrequency; f++) { int i,j,pol1,pol2; for (i = 0; i < nstation; i++) { for (j = 0; j <= i; j++) { int k = f * (nstation + 1) * (nstation / 2) + i * (i + 1) / 2 + j; for (pol1 = 0; pol1 < npol; pol1++) { for (pol2 = 0; pol2 < npol; pol2++) { int index = (k * npol + pol1) * npol + pol2; matrix[(((f * nstation + i) * nstation + j) * npol + pol1) * npol + pol2] = packed[index]; matrix[(((f * nstation + j) * nstation + i) * npol + pol2) * npol + pol1] = conjf(packed[index]); // printf("f:%d s1:%d s2:%d %d p1:%d p2:%d %d\n",f,i,j,k,pol1,pol2,index); } } } } } } // end openmp } void extractMatrix_slow(float complex *matrix, float complex *packed) { int f, i, j, pol1, pol2; extern int npol; extern int nstation; extern int nfrequency; int in_index=0; int out_index=0; int out_index_conj=0; for (f = 0; f < nfrequency; f++) { for (i = 0; i < nstation; i++) { for (j = 0; j <= i; j++) { for (pol1 = 0; pol1 < npol; pol1++) { for (pol2 = 0; pol2 < npol; pol2++) { out_index = f*(nstation*nstation*npol*npol) + i*(nstation*npol*npol) + j*(npol*npol) + pol1*(npol) + pol2; out_index_conj = f*(nstation*nstation*npol*npol) + j*(nstation*npol*npol) + i*(npol*npol) + pol1*(npol) + pol2; matrix[out_index] = packed[in_index]; matrix[out_index_conj] = conjf(packed[in_index]); in_index++; } } } } } }
DRB013-nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Some threads may finish the for loop early and execute errors = dt[9]+1 while another thread may still be simultaneously executing the for worksharing region by writing to d[9], causing data races. Data race pair: a[i]@72:7 vs. a[9]@75:13. */ #include <stdio.h> int main() { int i,error; int len = 1000; int a[len], b=5; #pragma omp parallel for for (i=0; i<len; i++) a[i]= i; #pragma omp parallel shared(b, error) { #pragma omp for for(i = 0; i < len; i++) a[i] = b + a[i]*5; } error = a[9] + 1; printf ("error = %d\n", error); return 0; }
test.c
#include <stdio.h> #pragma omp requires unified_shared_memory #define M (1024*1024) #define BUFF_SIZE (1*M) #define N (8*BUFF_SIZE) int b[N]; int Test(int start, int size) { int i; int errors = 0; for(i=0; i<start; i++) b[i] = -1; for(i=start; i<size; i++) b[i] = i; for(i=size; i<N; i++) b[i] = -1; #pragma omp target parallel for for(int i=start; i<size; i++) b[i] += 1; for(i=0; i<start && errors<25; i++) { if (b[i] != -1) printf("%4i: before, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } for(i=start; i<size && errors<25; i++) { if (b[i] != i+1) printf("%4i: in, got %d, expected %d, %d error\n", i, b[i], i+1, ++errors); } for(i=size; i<N && errors<25; i++) { if (b[i] != -1) printf("%4i: after, got %d, expected %d, %d error\n", i, b[i], -1, ++errors); } if (errors>0) { printf("success with start %d, size %d (%d mod buff size)\n\n", start, size, size % BUFF_SIZE); } else { printf("%d errors with start %d, size %d (%d mod buff size)\n\n", errors, start, size, size % BUFF_SIZE); } return (errors>0); } int main() { int offset[] = {0, 1, 2, BUFF_SIZE/2, BUFF_SIZE-2, BUFF_SIZE-1}; int onum = 6; int errors = 0; for(int s1=0; s1<6; s1++) { for(int s2=0; s2<6; s2++) { errors += Test(offset[s1], N-offset[s2]); if (errors>20) { printf("abort due to errors\n"); return errors; } } } printf("finished with %d errors\n", errors); return errors; }
no_option.c
// RUN: %clang_cc1 -verify -o - %s // expected-no-diagnostics int a; #pragma omp threadprivate(a, b) #pragma omp parallel
DRB012-minusminus-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The -- operation is not protected, causing race condition. Data race pair: numNodes2@75 vs. numNodes2@75 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=100; if (argc>1) len = atoi(argv[1]); int numNodes=len, numNodes2=0; int x[len]; #pragma omp parallel for private(i) for (i=0; i< len; i++) { if (i%2==0) x[i]=5; else x[i]= -5; } #pragma omp parallel for private(i) reduction(+:numNodes2) for (i=numNodes-1 ; i>-1 ; --i) { if (x[i]<=0) { numNodes2+=-1; } } printf("%d\n", numNodes2); return 0; }
matsub.c
#include "matrix.h" /** \brief Subtracts a matrix from another matrix * * \param[in] A First input matrix * \param[in] B Second input matrix * \param[in] result Matrix to store the result * \return \f$ \mathbf{A}- \mathbf{B} \f$ * */ MATRIX mat_sub(MATRIX A, MATRIX B, MATRIX result) { int i, j, m, n, o, p; m = MatCol(A); n = MatRow(A); o = MatCol(B); p = MatRow(B); if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL) return mat_error(MAT_MALLOC); if(o==m &&p==n) { #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[i][j]-B[i][j]; } } } else if(o==1 && p!=1) { #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[i][j]-B[i][0]; } } } else if(p==1 && o!=1) { #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[i][j]-B[0][j]; } } } else gen_error(GEN_SIZEMISMATCH); return result; } /** \brief Subtracts a scalar from a matrix * * \param[in] A Input matrix * \param[in] s Input scalar * \param[in] result Matrix to store the result * \return \f$ \mathbf{A}-s\mathbf{11}^T \f$ * */ MATRIX mat_subs(MATRIX A, mtype s, MATRIX result) { int i, j, m, n; m = MatCol(A); n = MatRow(A); if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[i][j]-s; } } return result; } /** \brief Subtracts a matrix from a scalar * * \param[in] A Input matrix * \param[in] s Input scalar * \param[in] result Matrix to store the result * \return \f$ s\mathbf{11}^T -\mathbf{A} \f$ * */ MATRIX mat_subs_neg(MATRIX A, mtype s, MATRIX result) { int i, j, m, n; m = MatCol(A); n = MatRow(A); if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = s-A[i][j]; } } return result; } /** \brief Subtracts an integer vector from integer vector * * \param[in] A Input vector * \param[in] B Input vector * \param[in] result Vector to store the result * \return \f$ \mathbf{A}-\mathbf{B} \f$ * */ INT_VECTOR int_vec_sub(INT_VECTOR A, INT_VECTOR B, INT_VECTOR result) { int i, m; m = Int_VecLen(A); if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL) int_vec_error(INT_VEC_MALLOC); if(m!=Int_VecLen(B)) gen_error(GEN_SIZEMISMATCH); #pragma omp parallel for for(i=0; i<m; ++i) result[i] = A[i]-B[i]; return result; } /** \brief Subtracts an integer from integer vector * * \param[in] A Input vector * \param[in] s Input scalar * \param[in] result Vector to store the result * \return \f$ \mathbf{A}-s\mathbf{1} \f$ * */ INT_VECTOR int_vec_subs(INT_VECTOR A, int s, INT_VECTOR result) { int i, m; m = Int_VecLen(A); if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL) int_vec_error(INT_VEC_MALLOC); #pragma omp parallel for for(i=0; i<m; ++i) result[i] = A[i]-s; return result; } /** \brief Subtracts an integer vector from an integer * * \param[in] A Input vector * \param[in] s Input scalar * \param[in] result Vector to store the result * \return \f$ s\mathbf{1}-\mathbf{A} \f$ * */ INT_VECTOR int_vec_subs_neg(INT_VECTOR A, int s, INT_VECTOR result) { int i, m; m = Int_VecLen(A); if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL) int_vec_error(INT_VEC_MALLOC); #pragma omp parallel for for(i=0; i<m; ++i) result[i] = s-A[i]; return result; }
core_zunmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_unmqr * * Overwrites the general m-by-n tile C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is a unitary matrix defined as the product of k * elementary reflectors * \f[ * Q = H(1) H(2) ... H(k) * \f] * as returned by plasma_core_zgeqrt. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : No transpose, apply Q; * - Plasma_ConjTrans : Transpose, apply Q^H. * * @param[in] m * The number of rows of the tile C. m >= 0. * * @param[in] n * The number of columns of the tile C. n >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * If side = PlasmaLeft, m >= k >= 0; * if side = PlasmaRight, n >= k >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in] A * Dimension: (lda,k) * The i-th column must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, * as returned by plasma_core_zgeqrt in the first k columns of its * array argument A. * * @param[in] lda * The leading dimension of the array A. * If side = PlasmaLeft, lda >= max(1,m); * if side = PlasmaRight, lda >= max(1,n). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param[in,out] C * On entry, the m-by-n tile C. * On exit, C is overwritten by Q*C or Q^T*C or C*Q^T or C*Q. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * * @param work * Auxiliary workspace array of length * ldwork-by-n if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_complex64_t *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_coreblas_error("illegal value of side"); return -1; } int nq; // order of Q int nw; // dimension of work if (side == PlasmaLeft) { nq = m; nw = n; } else { nq = n; nw = m; } if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m < 0) { plasma_coreblas_error("illegal value of m"); return -3; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -4; } if (k < 0 || k > nq) { plasma_coreblas_error("illegal value of k"); return -5; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -6; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -7; } if (lda < imax(1, nq) && nq > 0) { plasma_coreblas_error("illegal value of lda"); return -8; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -9; } if (ldt < imax(1, ib)) { plasma_coreblas_error("illegal value of ldt"); return -10; } if (C == NULL) { plasma_coreblas_error("NULL C"); return -11; } if (ldc < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of ldc"); return -12; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -13; } if (ldwork < imax(1, nw) && nw > 0) { plasma_coreblas_error("illegal value of ldwork"); return -14; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int ni = n; int mi = m; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = m - i; ic = i; } else { // H or H^H is applied to C(1:m,i:n). ni = n - i; jc = i; } // Apply H or H^H. LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(side), lapack_const(trans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), mi, ni, kb, &A[lda*i+i], lda, &T[ldt*i], ldt, &C[ldc*jc+ic], ldc, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*k]) \ depend(in:T[0:ib*k]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? n : m; // TODO: double check // Call the kernel. int info = plasma_core_zunmqr(side, trans, m, n, k, ib, A, lda, T, ldt, C, ldc, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_zunmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
indexreduce.h
/* * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef INDEXREDUCE_H_ #define INDEXREDUCE_H_ #include "../helpers/shape.h" #ifdef _OPENMP #include <omp.h> #endif #include <dll.h> #include <ops/ops.h> #include <op_boilerplate.h> #ifdef __CUDACC__ #include <helper_cuda.h> #include <cuda.h> #include <cuda_runtime.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include <helpers/TAD.h> #include "../pairwise_util.h" #include "legacy_ops.h" namespace functions { namespace indexreduce { #ifdef __CUDACC__ // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. template<typename T> struct SharedIndexValue { // Ensure that we won't compile any un-specialized types __device__ T * getPointer() { extern __device__ void error(void); error(); return 0; } }; // Following are the specializations for the following types. // int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double // One could also specialize it for user-defined types. template<> struct SharedIndexValue<float> { __device__ IndexValue<float> * getPointer() { extern __shared__ IndexValue<float> s_int2[]; return s_int2; } }; // Following are the specializations for the following types. // int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double // One could also specialize it for user-defined types. template<> struct SharedIndexValue<double> { __device__ IndexValue<double> * getPointer() { extern __shared__ IndexValue<double> s_int6[]; return s_int6; } }; #endif template<typename T> class IndexReduce { public: #ifdef __CUDACC__ static inline __device__ void transform( const int opNum, T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadShapeInfo, Nd4jIndex *tadOffset) { DISPATCH_BY_OPNUM(transform, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename OpType> static inline __device__ void aggregatePartials(IndexValue<T> **sPartialsRef,int tid,int numElements,T *extraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. IndexValue<T> *sPartials = *sPartialsRef; int floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while ( floorPow2 & (floorPow2 - 1) ) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { IndexValue<T> prev = sPartials[tid - floorPow2]; IndexValue<T> curr = sPartials[tid]; sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { IndexValue<T> curr = sPartials[tid]; IndexValue<T> next = sPartials[tid + activeThreads]; sPartials[tid] = OpType::update(curr,next,extraParams); } __syncthreads(); } } /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename OpType> static inline __device__ void transform( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { /** * Gpu information for the problem */ int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; //shared memory space for storing intermediate results IndexValue<T> *sPartials; sPartials = (IndexValue<T> *)manager->getSharedReductionBuffer(); //holder.getPointer(); // T startingVal = OpType::startingValue(dx); // IndexValue <T> val = {startingVal, threadIdx.x}; sPartials[threadIdx.x] = OpType::startingIndexValue(dx); //length for the tad __shared__ volatile Nd4jIndex xLength; __shared__ volatile Nd4jIndex resultLength; //only compute the tad indexes once IndexValue <T> reduction = OpType::startingIndexValue(dx); if (threadIdx.x == 0) { if (resultShapeInfo != nullptr) resultLength = shape::length(resultShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (dimension == nullptr || dimension[0] == MAX_DIMENSION) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; // xElementWiseStride = shape::elementWiseStride(xShapeInfo); xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ Nd4jIndex tadLength; __shared__ int tadEWS; __shared__ int tadRank; __shared__ int numTads; __shared__ int *tadShape; __shared__ int *tadStride; __shared__ char tadOrder; if (threadIdx.x == 0) { tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); tadOrder = shape::order(tadOnlyShapeInfo); } __syncthreads(); if (dimensionLength > 1 || tadEWS < 1) { int xCoord[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jIndex tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingIndexValue(dx); for(int i = threadIdx.x;i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank,tadShape, i, xCoord); Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); IndexValue<T> comp {dx[xOffset], i}; sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams); __syncthreads(); if (threadIdx.x == 0) { result[r] = (T) sPartials[threadIdx.x].index; } } } else { for(int i = blockIdx.x; i < numTads; i+= gridDim.x) { Nd4jIndex tadOffsetForBlock = tadOffsets[i]; sPartials[threadIdx.x] = OpType::startingIndexValue(dx); for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) { IndexValue<T> comp {dx[tadOffsetForBlock + x * tadEWS], x}; sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams); __syncthreads(); if (threadIdx.x == 0) { result[i] = (T) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams); } } } } //reduce to 1 result else if (resultScalar) { Nd4jIndex n = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); if(xElementWiseStride >= 1) { for(Nd4jIndex i = tid;i < n; i += (blockDim.x * gridDim.x)) { IndexValue <T> indexVal = {dx[i * xElementWiseStride], i}; reduction = OpType::update(reduction, indexVal, extraParams); } } else { int rank = shape::rank(xShapeInfo); int ind2sub[MAX_RANK]; for(Nd4jIndex i = tid;i < n; i += blockDim.x * gridDim.x) { shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i,ind2sub); Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),ind2sub,rank); IndexValue <T> indexVal = {dx[offset], i}; reduction = OpType::update(reduction, indexVal, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, (int) n),extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *) reductionBuffer; int rank = shape::rank(xShapeInfo); tid = threadIdx.x; if (threadIdx.x == 0) { IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer; pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index}; } __threadfence(); __syncthreads(); if (tid==0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x-1); } __syncthreads(); if (amLast) { tc[16384] = 0; IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer; sPartials[threadIdx.x] = OpType::startingIndexValue(dx); for (Nd4jIndex i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams); __syncthreads(); if (tid == 0) { result[0] = (T) sPartials[0].index; } } } else { if (tid == 0) { unsigned int *tc = (unsigned *) reductionBuffer; tc[16384] = 0; result[0] = (T) sPartials[0].index; } } } } #endif static T execScalar( const int opNum, T *x, int *xShapeInfo, T *extraParams) { RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x, xShapeInfo, extraParams), INDEX_REDUCE_OPS); } static void exec(const int opNum, T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffset) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS); } template<typename OpType> #ifdef __CUDACC__ __host__ #elif defined(__GNUC__) #endif static inline T execScalar(T *x, int *xShapeInfo, T *extraParams) { //T startingVal = OpType::startingValue(x); IndexValue<T> startingIndex = OpType::startingIndexValue(x); Nd4jIndex length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); if(xElementWiseStride < 1) { int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int tadRank = shape::rank(xShapeInfo); int xCoord[MAX_RANK]; for (Nd4jIndex i = 0; i < length; i++) { shape::ind2subC(tadRank,xShape, i, xCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, tadRank); IndexValue<T> curr; curr.value = x[xOffset]; curr.index = i; startingIndex = OpType::update(startingIndex, curr, extraParams); } return startingIndex.index; } else { if (xElementWiseStride == 1) { if(length < ELEMENT_THRESHOLD) { // FIXME: proper reduction to be used here //#pragma omp simd for (Nd4jIndex i = 0; i < length; i++) { IndexValue<T> curr; curr.value = x[i]; curr.index = i; startingIndex = OpType::update(startingIndex, curr, extraParams); } return startingIndex.index; } else { BlockInformation info(length, ELEMENT_THRESHOLD); #pragma omp parallel num_threads(info.threads) if (info.threads > 1) default(shared) { IndexValue<T> local = OpType::startingIndexValue(x); for (Nd4jIndex i = omp_get_thread_num(); i < info.chunks; i+= info.threads) { Nd4jIndex newOffset = (i * info.items); T *chunk = x + newOffset; Nd4jIndex itemsToLoop = info.items; if(newOffset >= length) { break; } //handle modulo case if(newOffset + info.items >= length) { itemsToLoop = length - newOffset; } for (Nd4jIndex j = 0; j < itemsToLoop; j++) { IndexValue<T> curr; curr.value = chunk[j]; curr.index = newOffset + j; local = OpType::update(local, curr, extraParams); } #pragma omp critical { startingIndex = OpType::update(startingIndex, local, extraParams); } } } return startingIndex.index; } } else { for (Nd4jIndex i = 0; i < length; i++) { IndexValue<T> curr; curr.value = x[i * xElementWiseStride]; curr.index = i; startingIndex = OpType::update(startingIndex, curr, extraParams); } } } return startingIndex.index; } template<typename OpType> #ifdef __CUDACC__ __host__ #elif defined(__GNUC__) #endif static inline void exec(T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffset) { if(shape::isScalar(resultShapeInfoBuffer)) { result[0] = execScalar<OpType>(x,xShapeInfo,extraParams); return; } const Nd4jIndex resultLength = shape::length(resultShapeInfoBuffer); IndexValue<T> *startingIndex = new IndexValue<T>[resultLength]; #pragma omp parallel for schedule(guided) if (resultLength > TAD_THRESHOLD) default(shared) for (Nd4jIndex i = 0; i < resultLength; i++) { IndexValue<T> val = OpType::startingIndexValue(x); startingIndex[i] = val; } int *tadOnlyShapeInfo = tadShapeInfo; Nd4jIndex *tadOffsets = tadOffset; shape::TAD *tad = nullptr; if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) { tad = new shape::TAD(xShapeInfo, dimension, dimensionLength); tad->createTadOnlyShapeInfo(); tad->createOffsets(); if (tad->dimensionLength < 1) { delete tad; delete[] startingIndex; return; } tadOnlyShapeInfo = tad->tadOnlyShapeInfo; tadOffsets = tad->tadOffsets; } int tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); int numTads = shape::length(xShapeInfo) / tadLength; if(!(shape::elementWiseStride(tadOnlyShapeInfo) > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo)))) { /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ int *tadShapeShapeInfo = tadOnlyShapeInfo; int *xShape = shape::shapeOf(tadShapeShapeInfo); int *xStride = shape::stride(tadShapeShapeInfo); int rank = shape::rank(tadShapeShapeInfo); #pragma omp parallel for schedule(guided) if (resultLength > TAD_THRESHOLD) default(shared) for(Nd4jIndex i = 0; i < resultLength; i++) { Nd4jIndex offset = tadOffsets[i]; IndexValue<T> indexValue = OpType::startingIndexValue(&x[offset]); int xCoord[MAX_RANK]; for(int j = 0; j < tadLength; j++) { shape::ind2subC(rank,xShape, j, xCoord); Nd4jIndex xOffset = shape::getOffset(offset, xShape, xStride, xCoord, rank); IndexValue<T> comp; comp.index = j; comp.value = x[xOffset]; indexValue = OpType::update(indexValue,comp,extraParams); } result[i] = indexValue.index; } } else { int tadElementWiseStride = shape::elementWiseStride(tadOnlyShapeInfo); //const int tadLength = shape::length(tadOnlyShapeInfo); //#pragma omp parallel for schedule(guided) if (resultLength > TAD_THRESHOLD) default(shared) for(Nd4jIndex i = 0; i < resultLength; i++) { Nd4jIndex baseOffset = tadOffsets[i]; IndexValue<T> indexValue = OpType::startingIndexValue(&x[baseOffset]); // FIXME: proper reduction required here for(int j = 0; j < tadLength; j++) { IndexValue<T> comp; comp.index = j; comp.value = x[baseOffset + tadElementWiseStride * j]; indexValue = OpType::update(indexValue,comp,extraParams); } result[i] = indexValue.index; } } delete[] startingIndex; } }; } } #ifdef __CUDACC__ /** * The external driver * api interface to the cuda kernel * @param op the operation number to execute * @param n the length of the input * @param dx the input data * @param xShapeInfo the input data shape information * @param extraParams the extra parameters for the reduce * @param result the result buffer * @param resultShapeInfo the shape information for the result * @param gpuInformation the shape information for the data * @param dimension the dimension to do reduce along long * @param dimensionLength the length of the dimension buffer * @param postProcessOrNot whether to pre process or not */ template <typename T> __device__ void indexReduceGeneric( const int op, T *dx, int *xShapeInfo, int xRank, T *extraParams, T *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::indexreduce::IndexReduce<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::indexreduce::IndexReduce<T>::transform( op, dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); } /** * The external driver * api interface to the cuda kernel * @param op the operation number to execute * @param n the length of the input * @param dx the input data * @param xShapeInfo the input data shape information * @param extraParams the extra parameters for the reduce * @param result the result buffer * @param resultShapeInfo the shape information for the result * @param gpuInformation the shape information for the data * @param dimension the dimension to do reduce along long * @param dimensionLength the length of the dimension buffer * @param postProcessOrNot whether to pre process or not */ __global__ void indexReduceDouble( int op, double *dx, int *xShapeInfo, int xRank, double *extraParams, double *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { indexReduceGeneric<double>( op, dx, xShapeInfo, xRank, extraParams, result, resultShapeInfo, zRank, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } /** * The external driver * api interface to the cuda kernel * @param op the operation number to execute * @param n the length of the input * @param dx the input data * @param xShapeInfo the input data shape information * @param extraParams the extra parameters for the reduce * @param result the result buffer * @param resultShapeInfo the shape information for the result * @param gpuInformation the shape information for the data * @param dimension the dimension to do reduce along long * @param dimensionLength the length of the dimension buffer * @param postProcessOrNot whether to pre process or not */ __global__ void indexReduceFloat( int op, float *dx, int *xShapeInfo, int xRank, float *extraParams, float *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { indexReduceGeneric<float>( op, dx, xShapeInfo, xRank, extraParams, result, resultShapeInfo, zRank, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } __global__ void indexReduceHalf( int op, float16 *dx, int *xShapeInfo, int xRank, float16 *extraParams, float16 *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { indexReduceGeneric<float16>( op, dx, xShapeInfo, xRank, extraParams, result, resultShapeInfo, zRank, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } #endif #endif /* INDEXREDUCE_H_ */
GnatNearestNeighbors.h
// // Copyright (c) 2009, Markus Rickert // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // #ifndef RL_MATH_GNATNEARESTNEIGHBORS_H #define RL_MATH_GNATNEARESTNEIGHBORS_H #include <algorithm> #include <iterator> #include <limits> #include <random> #include <type_traits> #include <utility> #include <vector> #include <boost/optional.hpp> namespace rl { namespace math { /** * Geometric Near-Neighbor Access Tree (GNAT). * * Sergey Brin. Near neighbor search in large metric spaces. In Proceedings of * the International Conference on Very Large Data Bases, pages 574-584, * Zurich, Switzerland, September, 1985. * * http://www.vldb.org/conf/1995/P574.PDF */ template<typename MetricT> class GnatNearestNeighbors { private: struct Node; public: typedef const typename MetricT::Value& const_reference; typedef ::std::ptrdiff_t difference_type; typedef typename MetricT::Value& reference; typedef ::std::size_t size_type; typedef typename MetricT::Value value_type; typedef typename MetricT::Distance Distance; typedef MetricT Metric; typedef typename MetricT::Value Value; typedef ::std::pair<Distance, Value> Neighbor; explicit GnatNearestNeighbors(const Metric& metric) : checks(), generator(::std::random_device()()), metric(metric), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(0, 0, nodeDegree, nodeDataMax, true), values(0) { } explicit GnatNearestNeighbors(Metric&& metric = Metric()) : checks(), generator(::std::random_device()()), metric(::std::move(metric)), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(0, 0, nodeDegree, nodeDataMax, true), values(0) { } template<typename InputIterator> GnatNearestNeighbors(InputIterator first, InputIterator last, const Metric& metric) : checks(), generator(::std::random_device()()), metric(metric), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(first, last, 0, 0, nodeDegree, nodeDataMax, true), values(::std::distance(first, last)) { if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } } template<typename InputIterator> GnatNearestNeighbors(InputIterator first, InputIterator last, Metric&& metric = Metric()) : checks(), generator(::std::random_device()()), metric(::std::move(metric)), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(first, last, nullptr, 0, 0, nodeDegree, nodeDataMax, true), values(::std::distance(first, last)) { if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } } ~GnatNearestNeighbors() { } void clear() { this->root.children.clear(); this->root.children.reserve(this->nodeDegree); this->root.data.clear(); this->root.data.reserve(this->nodeDataMax + 1); this->values = 0; } ::std::vector<Value> data() const { ::std::vector<Value> data; data.reserve(this->values); this->data(this->root, data); return data; } bool empty() const { return this->root.removed && this->root.data.empty() && this->root.children.empty(); } ::boost::optional<::std::size_t> getChecks() const { return this->checks; } ::std::size_t getNodeDataMax() const { return this->nodeDataMax; } ::std::size_t getNodeDegree() const { return this->nodeDegree; } ::std::size_t getNodeDegreeMax() const { return this->nodeDegreeMax; } ::std::size_t getNodeDegreeMin() const { return this->nodeDegreeMin; } template<typename InputIterator> void insert(InputIterator first, InputIterator last) { if (this->empty()) { this->root.data.insert(this->root.data.end(), first, last); if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } this->values += ::std::distance(first, last); } else { for (InputIterator i = first; i != last; ++i) { this->push(*i); } } } ::std::vector<Neighbor> nearest(const Value& query, const ::std::size_t& k, const bool& sorted = true) const { return this->search(query, &k, nullptr, sorted); } void push(const Value& value) { this->push(this->root, value); ++this->values; } ::std::vector<Neighbor> radius(const Value& query, const Distance& radius, const bool& sorted = true) const { return this->search(query, nullptr, &radius, sorted); } void seed(const ::std::mt19937::result_type& value) { this->generator.seed(value); } void setChecks(const ::boost::optional<::std::size_t>& checks) { this->checks = checks; } void setNodeDataMax(const ::std::size_t& nodeDataMax) { this->nodeDataMax = nodeDataMax; } void setNodeDegree(const ::std::size_t& nodeDegree) { this->nodeDegree = nodeDegree; } void setNodeDegreeMax(const ::std::size_t& nodeDegreeMax) { this->nodeDegreeMax = nodeDegreeMax; } void setNodeDegreeMin(const ::std::size_t& nodeDegreeMin) { this->nodeDegreeMin = nodeDegreeMin; } ::std::size_t size() const { return this->values; } void swap(GnatNearestNeighbors& other) { using ::std::swap; swap(this->generator, other.generator); swap(this->metric, other.metric); swap(this->nodeDegree, other.nodeDegree); swap(this->nodeDegreeMax, other.nodeDegreeMax); swap(this->nodeDegreeMin, other.nodeDegreeMin); swap(this->nodeDataMax, other.nodeDataMax); swap(this->root, other.root); swap(this->values, other.values); } friend void swap(GnatNearestNeighbors& lhs, GnatNearestNeighbors& rhs) { lhs.swap(rhs); } protected: private: typedef ::std::pair<Distance, const Node*> Branch; struct BranchCompare { bool operator()(const Branch& lhs, const Branch& rhs) const { return lhs.first - lhs.second->max[lhs.second->index] > rhs.first - rhs.second->max[rhs.second->index]; } }; struct NeighborCompare { bool operator()(const Neighbor& lhs, const Neighbor& rhs) const { return lhs.first < rhs.first; } }; struct Node { Node(const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) : children(), data(), degree(degree), index(index), max(siblings + 1, -::std::numeric_limits<Distance>::infinity()), min(siblings + 1, ::std::numeric_limits<Distance>::infinity()), pivot(), removed(removed) { this->children.reserve(degree); this->data.reserve(capacity + 1); } template<typename InputIterator> Node(InputIterator first, InputIterator last, const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) : children(), data(first, last), degree(degree), index(index), max(siblings + 1, -::std::numeric_limits<Distance>::infinity()), min(siblings + 1, ::std::numeric_limits<Distance>::infinity()), pivot(), removed(removed) { this->children.reserve(degree); this->data.reserve(capacity + 1); } ~Node() { } void swap(Node& other) { using ::std::swap; swap(this->children, other.children); swap(this->data, other.data); swap(this->degree, other.degree); swap(this->index, other.index); swap(this->max, other.max); swap(this->min, other.min); swap(this->pivot, other.pivot); swap(this->removed, other.removed); } friend void swap(Node& lhs, Node& rhs) { lhs.swap(rhs); } ::std::vector<Node> children; ::std::vector<Value> data; ::std::size_t degree; ::std::size_t index; ::std::vector<Distance> max; ::std::vector<Distance> min; Value pivot; bool removed; }; void choose(const Node& node, ::std::vector<::std::size_t>& centers, ::std::vector<::std::vector<Distance>>& distances) { ::std::size_t k = node.degree; ::std::vector<Distance> min(node.data.size(), ::std::numeric_limits<Distance>::infinity()); ::std::uniform_int_distribution<::std::size_t> distribution(0, node.data.size() - 1); centers[0] = distribution(this->generator); for (::std::size_t i = 0; i < k - 1; ++i) { Distance max = Distance(); for (::std::size_t j = 0; j < node.data.size(); ++j) { distances[i][j] = j != centers[i] ? this->metric(node.data[j], node.data[centers[i]]) : 0; min[j] = ::std::min(min[j], distances[i][j]); if (min[j] > max) { max = min[j]; centers[i + 1] = j; } } } for (::std::size_t j = 0; j < node.data.size(); ++j) { distances[k - 1][j] = this->metric(node.data[j], node.data[centers[k - 1]]); } } void data(const Node& node, ::std::vector<Value>& data) const { data.insert(data.end(), node.data.begin(), node.data.end()); for (::std::size_t i = 0; i < node.children.size(); ++i) { data.push_back(node.children[i].pivot); this->data(node.children[i], data); } } void push(Node& node, const Value& value) { if (node.children.empty()) { node.data.push_back(value); if (node.data.size() > this->nodeDataMax && node.data.size() > node.degree) { this->split(node); } } else { ::std::vector<Distance> distances(node.children.size()); ::std::size_t index = 0; Distance min = ::std::numeric_limits<Distance>::infinity(); for (::std::size_t i = 0; i < node.children.size(); ++i) { distances[i] = this->metric(value, node.children[i].pivot); if (distances[i] < min) { index = i; min = distances[i]; } } for (::std::size_t i = 0; i < node.children.size(); ++i) { node.children[i].max[index] = ::std::max(node.children[i].max[index], distances[i]); node.children[i].min[index] = ::std::min(node.children[i].min[index], distances[i]); } this->push(node.children[index], value); } } ::std::vector<Neighbor> search(const Value& query, const ::std::size_t* k, const Distance* radius, const bool& sorted) const { ::std::vector<Neighbor> neighbors; if (this->empty()) { return neighbors; } if (nullptr != k) { neighbors.reserve(::std::min(*k, this->size())); } ::std::size_t checks = 0; ::std::vector<Branch> branches; this->search(this->root, query, k, radius, branches, neighbors, checks); while (!branches.empty() && (!this->checks || checks < this->checks)) { Branch branch = ::std::move(branches.front()); ::std::pop_heap(branches.begin(), branches.end(), BranchCompare()); branches.pop_back(); if (nullptr == k || *k == neighbors.size()) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; if (branch.first - distance > branch.second->max[branch.second->index] || branch.first + distance < branch.second->min[branch.second->index]) { continue; } } this->search(*branch.second, query, k, radius, branches, neighbors, checks); } if (sorted) { ::std::sort_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } return neighbors; } void search(const Node& node, const Value& query, const ::std::size_t* k, const Distance* radius, ::std::vector<Branch>& branches, ::std::vector<Neighbor>& neighbors, ::std::size_t& checks) const { if (node.children.empty()) { for (::std::size_t i = 0; i < node.data.size(); ++i) { Distance distance = this->metric(query, node.data[i]); if (nullptr == k || neighbors.size() < *k || distance < neighbors.front().first) { if (nullptr == radius || distance < *radius) { if (nullptr != k && *k == neighbors.size()) { ::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); neighbors.pop_back(); } neighbors.emplace_back(distance, node.data[i]); ::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } } if (this->checks && ++checks > this->checks) { return; } } } else { ::std::vector<Distance> distances(node.children.size()); ::std::vector<bool> removed(node.children.size(), false); for (::std::size_t i = 0; i < node.children.size(); ++i) { if (!removed[i]) { distances[i] = this->metric(query, node.children[i].pivot); if (!node.children[i].removed) { if (nullptr == k || neighbors.size() < *k || distances[i] < neighbors.front().first) { if (nullptr == radius || distances[i] < *radius) { if (nullptr != k && *k == neighbors.size()) { ::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); neighbors.pop_back(); } neighbors.emplace_back(distances[i], node.children[i].pivot); ::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } } } if (nullptr == k || *k == neighbors.size()) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; for (::std::size_t j = 0; j < node.children.size(); ++j) { if (i != j && !removed[j]) { if (distances[i] - distance > node.children[i].max[j] || distances[i] + distance < node.children[i].min[j]) { removed[j] = true; } } } } if (this->checks && ++checks > this->checks) { return; } } } for (::std::size_t i = 0; i < node.children.size(); ++i) { if (!removed[i]) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; if (distances[i] - distance <= node.children[i].max[i] && distances[i] + distance >= node.children[i].min[i]) { branches.emplace_back(distances[i], &node.children[i]); ::std::push_heap(branches.begin(), branches.end(), BranchCompare()); } } } } } void split(Node& node) { ::std::vector<::std::vector<Distance>> distances(node.degree, ::std::vector<Distance>(node.data.size())); ::std::vector<::std::size_t> centers(node.degree); this->choose(node, centers, distances); for (::std::size_t i = 0; i < centers.size(); ++i) { node.children.emplace_back(i, node.degree - 1, this->nodeDegree, this->nodeDataMax); node.children[i].pivot = ::std::move(node.data[centers[i]]); } for (::std::size_t i = 0; i < node.data.size(); ++i) { ::std::size_t index = 0; Distance min = ::std::numeric_limits<Distance>::infinity(); for (::std::size_t j = 0; j < centers.size(); ++j) { Distance distance = distances[j][i]; if (distance < min) { index = j; min = distance; } } for (::std::size_t j = 0; j < centers.size(); ++j) { if (i != centers[j]) { node.children[j].max[index] = ::std::max(node.children[j].max[index], distances[j][i]); node.children[j].min[index] = ::std::min(node.children[j].min[index], distances[j][i]); } } if (i != centers[index]) { node.children[index].data.push_back(::std::move(node.data[i])); } } for (::std::size_t i = 0; i < node.children.size(); ++i) { node.children[i].degree = ::std::min(::std::max(this->nodeDegree * node.children[i].data.size() / node.data.size(), this->nodeDegreeMin), this->nodeDegreeMax); if (node.children[i].data.empty()) { node.children[i].max[i] = Distance(); node.children[i].min[i] = Distance(); } } #ifdef _OPENMP ::std::size_t size = node.data.size(); #endif node.data.clear(); node.data.shrink_to_fit(); #ifdef _OPENMP #pragma omp parallel for if (size > 2 * this->nodeDataMax) #if _OPENMP < 200805 for (::std::ptrdiff_t i = 0; i < node.children.size(); ++i) #else for (::std::size_t i = 0; i < node.children.size(); ++i) #endif #else for (::std::size_t i = 0; i < node.children.size(); ++i) #endif { if (node.children[i].data.size() > this->nodeDataMax && node.children[i].data.size() > node.children[i].degree) { this->split(node.children[i]); } } } ::boost::optional<::std::size_t> checks; ::std::mt19937 generator; Metric metric; ::std::size_t nodeDataMax; ::std::size_t nodeDegree; ::std::size_t nodeDegreeMax; ::std::size_t nodeDegreeMin; Node root; ::std::size_t values; }; } } #endif // RL_MATH_GNATNEARESTNEIGHBORS_H
stats_tools.c
/*Daala video codec Copyright (c) 2013 Daala project contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ #include <omp.h> #include <stdlib.h> #include <string.h> #include "stats_tools.h" #include "od_defs.h" #include "od_filter.h" #include "od_intra.h" #include "../src/dct.h" #include "../src/intra.h" #define PRINT_SCALE (0) void mode_data_init(mode_data *_md){ int i; _md->n=0; _md->mean=0; _md->var=0; for(i=0;i<B_SZ*B_SZ;i++){ _md->satd_avg[i]=0; } od_covmat_init(&_md->ref,B_SZ*B_SZ); od_covmat_init(&_md->res,B_SZ*B_SZ); } void mode_data_clear(mode_data *_md){ od_covmat_clear(&_md->ref); od_covmat_clear(&_md->res); } void mode_data_reset(mode_data *_md){ int i; _md->n=0; _md->mean=0; _md->var=0; for(i=0;i<B_SZ*B_SZ;i++){ _md->satd_avg[i]=0; } od_covmat_reset(&_md->ref); od_covmat_reset(&_md->res); } /* update the input mean and variance */ void mode_data_add_input(mode_data *_md,const unsigned char *_data,int _stride){ int n; int i; int j; n=_md->n*B_SZ*B_SZ; for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ double delta; double s; n++; s=1.0/n; delta=_data[_stride*j+i]*INPUT_SCALE-_md->mean; _md->mean+=delta*s; _md->var+=delta*delta*(n-1)*s; } } _md->n++; } void mode_data_add_block(mode_data *_md,const od_coeff *_block,int _stride, int _ref){ int j; int i; double buf[B_SZ*B_SZ]; for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ buf[B_SZ*j+i]=_block[_stride*j+i]; } } if(_ref){ od_covmat_add(&_md->ref,buf,1); } else{ od_covmat_add(&_md->res,buf,1); } } void mode_data_combine(mode_data *_a,const mode_data *_b){ double s; double delta; int i; if(_b->n==0){ return; } s=((double)_b->n)/(_a->n+_b->n); delta=_b->mean-_a->mean; _a->mean+=delta*s; for(i=0;i<B_SZ*B_SZ;i++){ _a->satd_avg[i]+=(_b->satd_avg[i]-_a->satd_avg[i])*s; } s*=_a->n; _a->var+=_b->var+delta*s; od_covmat_combine(&_a->ref,&_b->ref); od_covmat_combine(&_a->res,&_b->res); _a->n+=_b->n; } void mode_data_correct(mode_data *_md){ _md->var/=_md->n*B_SZ*B_SZ; od_covmat_correct(&_md->ref); od_covmat_correct(&_md->res); } void mode_data_print(mode_data *_md,const char *_label,double *_scale){ double cg_ref; double cg_res; int v; int u; double satd_avg; double bits_avg; cg_ref=10*log10(_md->var); cg_res=10*log10(_md->var); satd_avg=0; bits_avg=0; for(v=0;v<B_SZ;v++){ for(u=0;u<B_SZ;u++){ int i; int ii; double b; i=B_SZ*v+u; ii=B_SZ*B_SZ*i+i; cg_ref-=10*log10(_md->ref.cov[ii]*_scale[v]*_scale[u])/(B_SZ*B_SZ); cg_res-=10*log10(_md->res.cov[ii]*_scale[v]*_scale[u])/(B_SZ*B_SZ); satd_avg+=sqrt(_scale[v]*_scale[u])*_md->satd_avg[i]; b=sqrt(_scale[v]*_scale[u]*_md->res.cov[ii]/2); bits_avg+=1+OD_LOG2(b)+M_LOG2E/b*_md->satd_avg[i]; } } printf("%s Blocks %5i SATD %G Bits %G Mean %G Var %G CgRef %G CgRes %G Pg %G\n", _label,_md->n,satd_avg,bits_avg,_md->mean,_md->var,cg_ref,cg_res,cg_res-cg_ref); } void mode_data_params(mode_data *_this,double _b[B_SZ*B_SZ],double *_scale){ int v; int u; int i; int ii; for(v=0;v<B_SZ;v++){ for(u=0;u<B_SZ;u++){ i=(v*B_SZ+u); ii=B_SZ*B_SZ*i+i; _b[i]=sqrt(_scale[v]*_scale[u]*_this->res.cov[ii]/2); } } } void intra_stats_init(intra_stats *_this){ int mode; mode_data_init(&_this->fr); for(mode=0;mode<OD_INTRA_NMODES;mode++){ mode_data_init(&_this->md[mode]); } } void intra_stats_clear(intra_stats *_this){ int i; mode_data_clear(&_this->fr); for(i=0;i<OD_INTRA_NMODES;i++){ mode_data_clear(&_this->md[i]); } } void intra_stats_reset(intra_stats *_this){ int i; mode_data_reset(&_this->fr); for(i=0;i<OD_INTRA_NMODES;i++){ mode_data_reset(&_this->md[i]); } } void intra_stats_update(intra_stats *_this,const unsigned char *_data, int _stride,int _mode,const od_coeff *_ref,int _ref_stride, const double *_res,int _res_stride){ mode_data *fr; mode_data *md; int j; int i; double buf[B_SZ*B_SZ]; fr=&_this->fr; md=&_this->md[_mode]; /* Update the input mean and variance. */ mode_data_add_input(fr,_data,_stride); mode_data_add_input(md,_data,_stride); /* Update the reference mean and covariance. */ for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ buf[B_SZ*j+i]=_ref[_ref_stride*j+i]; } } od_covmat_add(&fr->ref,buf,1); od_covmat_add(&md->ref,buf,1); /* Update the residual mean and covariance. */ for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ buf[B_SZ*j+i]=_res[_res_stride*j+i]; } } od_covmat_add(&fr->res,buf,1); od_covmat_add(&md->res,buf,1); /* Update the average SATD. */ for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ double satd; satd=abs(buf[B_SZ*j+i]); fr->satd_avg[B_SZ*j+i]+=(satd-fr->satd_avg[B_SZ*j+i])/fr->n; md->satd_avg[B_SZ*j+i]+=(satd-md->satd_avg[B_SZ*j+i])/md->n; } } } void intra_stats_correct(intra_stats *_this){ int mode; mode_data_correct(&_this->fr); for(mode=0;mode<OD_INTRA_NMODES;mode++){ mode_data_correct(&_this->md[mode]); } } void intra_stats_print(intra_stats *_this,const char *_label, double *_scale){ int mode; printf("%s\n",_label); for(mode=0;mode<OD_INTRA_NMODES;mode++){ char label[16]; sprintf(label,"Mode %i",mode); mode_data_print(&_this->md[mode],label,_scale); } mode_data_print(&_this->fr,"Pooled",_scale); } void intra_stats_combine(intra_stats *_this,const intra_stats *_that){ int mode; mode_data_combine(&_this->fr,&_that->fr); for(mode=0;mode<OD_INTRA_NMODES;mode++){ mode_data_combine(&_this->md[mode],&_that->md[mode]); } } /* compute the scale factors for the DCT and TDLT transforms */ double VP8_SCALE[B_SZ]; double OD_SCALE[B_SZ]; #define SCALE_BITS (14) void vp8_scale_init(double _vp8_scale[B_SZ]){ int j; int i; od_coeff buf[B_SZ]; for(i=0;i<B_SZ;i++){ for(j=0;j<B_SZ;j++){ buf[j]=i!=j?0:(1<<SCALE_BITS); } #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*OD_IDCT_1D[B_SZ_LOG-OD_LOG_BSIZE0])(buf,1,buf); #else # error "Need an iDCT implementation for this block size." #endif _vp8_scale[i]=0; for(j=0;j<B_SZ;j++){ double c=((double)buf[j])/(1<<SCALE_BITS); _vp8_scale[i]+=c*c; } #if PRINT_SCALE printf("%s%- 24.18G",i==0?"":" ",_vp8_scale[i]); #endif } #if PRINT_SCALE printf("\n"); #endif } #define APPLY_PREFILTER (1) #define APPLY_POSTFILTER (1) void od_scale_init(double _od_scale[B_SZ]){ int i; int j; od_coeff buf[2*B_SZ]; for(i=0;i<B_SZ;i++){ for(j=0;j<2*B_SZ;j++){ buf[j]=(B_SZ>>1)+i!=j?0:(1<<SCALE_BITS); } #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*OD_IDCT_1D[B_SZ_LOG-OD_LOG_BSIZE0])(&buf[B_SZ>>1],1,&buf[B_SZ>>1]); #else # error "Need an iDCT implementation for this block size." #endif #if APPLY_POSTFILTER #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*NE_POST_FILTER[B_SZ_LOG-OD_LOG_BSIZE0])(buf,buf); (*NE_POST_FILTER[B_SZ_LOG-OD_LOG_BSIZE0])(&buf[B_SZ],&buf[B_SZ]); #else # error "Need a postfilter implementation for this block size." #endif #endif _od_scale[i]=0; for(j=0;j<2*B_SZ;j++){ double c=((double)buf[j])/(1<<SCALE_BITS); _od_scale[i]+=c*c; } #if PRINT_SCALE printf("%s%- 24.18G",i==0?"":" ",_od_scale[i]); #endif } #if PRINT_SCALE printf("\n"); #endif } #define SCALE_SATD (1) /* find the best vp8 mode */ int vp8_select_mode(const unsigned char *_data,int _stride,double *_weight){ double best_satd; double next_best_satd; int mode; int best_mode; best_mode=0; best_satd=UINT_MAX; next_best_satd=best_satd; for(mode=0;mode<OD_INTRA_NMODES;mode++){ unsigned char block[B_SZ*B_SZ]; od_coeff buf[B_SZ*B_SZ]; int j; int i; double satd; memset(block,0,B_SZ*B_SZ); vp8_intra_predict(block,B_SZ,_data,_stride,mode); for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ buf[B_SZ*j+i]=block[B_SZ*j+i]-_data[_stride*j+i]; } } #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*OD_FDCT_2D[B_SZ_LOG-OD_LOG_BSIZE0])(buf,B_SZ,buf,B_SZ); #else # error "Need an fDCT implementation for this block size." #endif satd=0; for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ #if SCALE_SATD satd+=sqrt(VP8_SCALE[j]*VP8_SCALE[i])*abs(buf[B_SZ*j+i]); #else satd+=abs(buf[B_SZ*j+i]); #endif } } if(satd<best_satd){ next_best_satd=best_satd; best_satd=satd; best_mode=mode; } else{ if(satd<next_best_satd){ next_best_satd=satd; } } } if(_weight!=NULL){ *_weight=best_mode!=0?next_best_satd-best_satd:1; } return best_mode; } int od_select_mode_bits(const od_coeff *_block,int _stride,double *_weight, double _b[OD_INTRA_NMODES][B_SZ*B_SZ]){ int best_mode; double best_bits; double next_best_bits; int mode; best_mode=0; best_bits=UINT_MAX; next_best_bits=best_bits; for(mode=0;mode<OD_INTRA_NMODES;mode++){ double p[B_SZ*B_SZ]; double bits; int j; int i; #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES #if 0 (*OD_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,_block,_stride,mode); #else (*NE_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,B_SZ,_block,_stride,mode); #endif #else # error "Need a predictor implementation for this block size." #endif bits=0; for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ double res; res=sqrt(OD_SCALE[j]*OD_SCALE[i])* abs(_block[_stride*j+i]-(od_coeff)floor(p[B_SZ*j+i]+0.5)); bits+=1+OD_LOG2(_b[mode][j*B_SZ+i])+M_LOG2E/_b[mode][j*B_SZ+i]*res; } } if(bits<best_bits){ next_best_bits=best_bits; best_bits=bits; best_mode=mode; } else{ if(bits<next_best_bits){ next_best_bits=bits; } } } if(_weight!=NULL){ *_weight=best_mode!=0?next_best_bits-best_bits:1; } return best_mode; } int od_select_mode_satd(const od_coeff *_block,int _stride,double *_weight){ int best_mode; double best_satd; double next_best_satd; int mode; best_mode=0; best_satd=UINT_MAX; next_best_satd=best_satd; for(mode=0;mode<OD_INTRA_NMODES;mode++){ double p[B_SZ*B_SZ]; double satd; int j; int i; #if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES #if 0 (*OD_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,_block,_stride,mode); #else (*NE_INTRA_MULT[B_SZ_LOG-OD_LOG_BSIZE0])(p,B_SZ,_block,_stride,mode); #endif #else # error "Need a predictor implementation for this block size." #endif satd=0; for(j=0;j<B_SZ;j++){ for(i=0;i<B_SZ;i++){ #if SCALE_SATD satd+=sqrt(OD_SCALE[j]*OD_SCALE[i])* abs(_block[_stride*j+i]-(od_coeff)floor(p[B_SZ*j+i]+0.5)); #else satd+=abs(_block[_stride*j+i]-(od_coeff)floor(p[B_SZ*j+i]+0.5)); #endif } } if(satd<best_satd){ next_best_satd=best_satd; best_mode=mode; best_satd=satd; } else{ if(satd<next_best_satd){ next_best_satd=satd; } } } if(_weight!=NULL){ *_weight=best_mode!=0?next_best_satd-best_satd:1; } return best_mode; } int ne_apply_to_blocks(void *_ctx,int _ctx_sz,int _plmask,int _padding, plane_start_func _start,int _nfuncs,const block_func *_funcs, plane_finish_func _finish,int _argc,const char *_argv[]){ int ai; #pragma omp parallel for schedule(dynamic) for(ai=1;ai<_argc;ai++){ FILE *fin; video_input vid; th_info ti; th_ycbcr_buffer ycbcr; int pli; int tid; unsigned char *ctx; fin=fopen(_argv[ai],"rb"); if(fin==NULL){ fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]); continue; } if(video_input_open(&vid,fin)<0){ fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]); continue; } video_input_get_info(&vid,&ti); if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){ fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]); continue; } tid=omp_get_thread_num(); ctx=((unsigned char *)_ctx)+tid*_ctx_sz; for(pli=0;pli<3;pli++){ if(_plmask&1<<pli){ int x0; int y0; int nxblocks; int nyblocks; get_intra_dims(&ti,pli,_padding,&x0,&y0,&nxblocks,&nyblocks); if(_start!=NULL){ (*_start)(ctx,_argv[ai],&ti,pli,nxblocks,nyblocks); } if(_funcs!=NULL){ int f; for(f=0;f<_nfuncs;f++){ if(_funcs[f]!=NULL){ const unsigned char *data; int stride; int bj; int bi; data=ycbcr[pli].data; stride=ycbcr[pli].stride; for(bj=0;bj<nyblocks;bj++){ int y; y=y0+B_SZ*bj; for(bi=0;bi<nxblocks;bi++){ int x; x=x0+B_SZ*bi; (*_funcs[f])(ctx,&data[stride*y+x],stride,bi,bj); } } } } } if(_finish!=NULL){ (*_finish)(ctx); } } } video_input_close(&vid); } return EXIT_SUCCESS; }
ParticleBConds3DSoa.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H #define QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H #include <config.h> #include <algorithm> #include <Lattice/CrystalLattice.h> #include <OhmmsSoA/VectorSoaContainer.h> namespace qmcplusplus { /** specialization for an open 3D */ template<class T> struct DTD_BConds<T,3,SUPERCELL_OPEN+SOA_OFFSET> { /** constructor: doing nothing */ inline DTD_BConds(const CrystalLattice<T,3>& lat) {} template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { dx[iat]=px[iat]-x0; dy[iat]=py[iat]-y0; dz[iat]=pz[iat]-z0; temp_r[iat]=std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a periodic 3D, orthorombic cell */ template<class T> struct DTD_BConds<T,3,PPPO+SOA_OFFSET> { T Linv0,L0,Linv1,L1,Linv2,L2,r2max,dummy; inline DTD_BConds(const CrystalLattice<T,3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) ,Linv1(lat.OneOverLength[1]), L1(lat.Length[1]) ,Linv2(lat.OneOverLength[2]), L2(lat.Length[2]) ,r2max(lat.CellRadiusSq),dummy(T()) { } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { const T x=(px[iat]-x0)*Linv0; const T y=(py[iat]-y0)*Linv1; const T z=(pz[iat]-z0)*Linv2; dx[iat]=L0*(x-round(x)); dy[iat]=L1*(y-round(y)); dz[iat]=L2*(z-round(z)); temp_r[iat]=std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a periodic 3D general cell with wigner-seitz==simulation cell * * Skip image cells. */ template<class T> struct DTD_BConds<T,3,PPPS+SOA_OFFSET> { T r00,r10,r20,r01,r11,r21,r02,r12,r22; T g00,g10,g20,g01,g11,g21,g02,g12,g22; DTD_BConds(const CrystalLattice<T,3>& lat) : r00(lat.R(0)),r10(lat.R(3)),r20(lat.R(6)) ,r01(lat.R(1)),r11(lat.R(4)),r21(lat.R(7)) ,r02(lat.R(2)),r12(lat.R(5)),r22(lat.R(8)) ,g00(lat.G(0)),g10(lat.G(3)),g20(lat.G(6)) ,g01(lat.G(1)),g11(lat.G(4)),g21(lat.G(7)) ,g02(lat.G(2)),g12(lat.G(5)),g22(lat.G(8)) { } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { T displ_0 =px[iat]-x0; T displ_1 =py[iat]-y0; T displ_2 =pz[iat]-z0; T ar_0=displ_0*g00+displ_1*g10+displ_2*g20; T ar_1=displ_0*g01+displ_1*g11+displ_2*g21; T ar_2=displ_0*g02+displ_1*g12+displ_2*g22; //put them in the box ar_0-=round(ar_0); ar_1-=round(ar_1); ar_2-=round(ar_2); //unit2cart dx[iat] = ar_0*r00+ar_1*r10+ar_2*r20; dy[iat] = ar_0*r01+ar_1*r11+ar_2*r21; dz[iat] = ar_0*r02+ar_1*r12+ar_2*r22; temp_r[iat] = std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a periodic 3D general cell * * Wigner-Seitz cell radius > simulation cell radius * Need to check image cells */ template<class T> struct DTD_BConds<T,3,PPPG+SOA_OFFSET> { T g00,g10,g20,g01,g11,g21,g02,g12,g22; T r00,r10,r20,r01,r11,r21,r02,r12,r22; VectorSoaContainer<T,3> corners; DTD_BConds(const CrystalLattice<T,3>& lat) { TinyVector<TinyVector<T,3>,3> rb; rb[0]=lat.a(0); rb[1]=lat.a(1); rb[2]=lat.a(2); find_reduced_basis(rb); r00=rb[0][0];r10=rb[1][0];r20=rb[2][0]; r01=rb[0][1];r11=rb[1][1];r21=rb[2][1]; r02=rb[0][2];r12=rb[1][2];r22=rb[2][2]; Tensor<T,3> rbt; for(int i=0; i<3; ++i) for(int j=0; j<3; ++j) rbt(i,j)=rb[i][j]; Tensor<T,3> g=inverse(rbt); g00=g(0); g10=g(3); g20=g(6); g01=g(1); g11=g(4); g21=g(7); g02=g(2); g12=g(5); g22=g(8); constexpr T minusone(-1); constexpr T zero(0); corners.resize(8); corners(0)=zero; corners(1)=minusone*(rb[0]); corners(2)=minusone*(rb[1]); corners(3)=minusone*(rb[2]); corners(4)=minusone*(rb[0]+rb[1]); corners(5)=minusone*(rb[0]+rb[2]); corners(6)=minusone*(rb[1]+rb[2]); corners(7)=minusone*(rb[0]+rb[1]+rb[2]); } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); const T* restrict cellx=corners.data(0); ASSUME_ALIGNED(cellx); const T* restrict celly=corners.data(1); ASSUME_ALIGNED(celly); const T* restrict cellz=corners.data(2); ASSUME_ALIGNED(cellz); constexpr T minusone(-1); constexpr T one(1); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { const T flip=iat<flip_ind?one:minusone; const T displ_0 =(px[iat]-x0)*flip; const T displ_1 =(py[iat]-y0)*flip; const T displ_2 =(pz[iat]-z0)*flip; const T ar_0=-std::floor(displ_0*g00+displ_1*g10+displ_2*g20); const T ar_1=-std::floor(displ_0*g01+displ_1*g11+displ_2*g21); const T ar_2=-std::floor(displ_0*g02+displ_1*g12+displ_2*g22); const T delx = displ_0+ar_0*r00+ar_1*r10+ar_2*r20; const T dely = displ_1+ar_0*r01+ar_1*r11+ar_2*r21; const T delz = displ_2+ar_0*r02+ar_1*r12+ar_2*r22; T rmin=delx*delx+dely*dely+delz*delz; int ic=0; #pragma unroll(7) for(int c=1; c<8; ++c) { const T x=delx+cellx[c]; const T y=dely+celly[c]; const T z=delz+cellz[c]; const T r2=x*x+y*y+z*z; ic=(r2<rmin)? c:ic; rmin=(r2<rmin)?r2:rmin; } temp_r[iat]=std::sqrt(rmin); dx[iat] = flip*(delx+cellx[ic]); dy[iat] = flip*(dely+celly[ic]); dz[iat] = flip*(delz+cellz[ic]); } } }; /** specialization for a slab, general cell */ template<class T> struct DTD_BConds<T,3,PPNG+SOA_OFFSET> { T g00,g10,g01,g11; T r00,r10,r01,r11; TinyVector<TinyVector<T,3>,3> rb; VectorSoaContainer<T,3> corners; DTD_BConds(const CrystalLattice<T,3>& lat) { rb[0]=lat.a(0); rb[1]=lat.a(1); rb[2]=lat.a(2); //rb[2]=0.0; r00=rb[0][0];r10=rb[1][0]; r01=rb[0][1];r11=rb[1][1]; g00=lat.G(0); g10=lat.G(3); g01=lat.G(1); g11=lat.G(4); T minusone=-1.0; corners.resize(4); corners(0)=0.0; corners(1)=minusone*(rb[0]); corners(2)=minusone*(rb[1]); corners(3)=minusone*(rb[0]+rb[1]); } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); const T* restrict cellx=corners.data(0); ASSUME_ALIGNED(cellx); const T* restrict celly=corners.data(1); ASSUME_ALIGNED(celly); constexpr T minusone(-1); constexpr T one(1); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { const T flip=iat<flip_ind?one:minusone; const T displ_0 =(px[iat]-x0)*flip; const T displ_1 =(py[iat]-y0)*flip; const T delz = pz[iat]-z0; const T ar_0=-std::floor(displ_0*g00+displ_1*g10); const T ar_1=-std::floor(displ_0*g01+displ_1*g11); const T delx = displ_0+ar_0*r00+ar_1*r10; const T dely = displ_1+ar_0*r01+ar_1*r11; T rmin=delx*delx+dely*dely; int ic=0; #pragma unroll(3) for(int c=1; c<4; ++c) { const T x=delx+cellx[c]; const T y=dely+celly[c]; const T r2=x*x+y*y; ic=(r2<rmin)? c:ic; rmin=(r2<rmin)?r2:rmin; } temp_r[iat]=std::sqrt(rmin+delz*delz); dx[iat] = flip*(delx+cellx[ic]); dy[iat] = flip*(dely+celly[ic]); dz[iat] = delz; } } }; /** specialization for a slab, orthorombic cell */ template<class T> struct DTD_BConds<T,3,PPNO+SOA_OFFSET> { T Linv0,L0,Linv1,L1; inline DTD_BConds(const CrystalLattice<T,3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) ,Linv1(lat.OneOverLength[1]), L1(lat.Length[1]) { } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { T x=(px[iat]-x0)*Linv0; dx[iat]=L0*(x-round(x)); T y=(py[iat]-y0)*Linv1; dy[iat]=L1*(y-round(y)); dz[iat]=pz[iat]-z0; temp_r[iat]=std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a slab, general cell */ template<class T> struct DTD_BConds<T,3,PPNS+SOA_OFFSET> { T r00,r10,r01,r11; T g00,g10,g01,g11; DTD_BConds(const CrystalLattice<T,3>& lat) : r00(lat.R(0)),r10(lat.R(3)) ,r01(lat.R(1)),r11(lat.R(4)) ,g00(lat.G(0)),g10(lat.G(3)) ,g01(lat.G(1)),g11(lat.G(4)) { } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { T displ_0 =px[iat]-x0; T displ_1 =py[iat]-y0; T ar_0=displ_0*g00+displ_1*g10; T ar_1=displ_0*g01+displ_1*g11; //put them in the box ar_0-=round(ar_0); ar_1-=round(ar_1); //unit2cart dx[iat] = ar_0*r00+ar_1*r10; dy[iat] = ar_0*r01+ar_1*r11; dz[iat] = pz[iat]-z0; temp_r[iat] = std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a wire */ template<class T> struct DTD_BConds<T,3,SUPERCELL_WIRE+SOA_OFFSET> { T Linv0,L0; inline DTD_BConds(const CrystalLattice<T,3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) { } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { const T x0=pos[0]; const T y0=pos[1]; const T z0=pos[2]; const T* restrict px=R0.data(0); const T* restrict py=R0.data(1); const T* restrict pz=R0.data(2); T* restrict dx=temp_dr.data(0); T* restrict dy=temp_dr.data(1); T* restrict dz=temp_dr.data(2); #pragma omp simd aligned(temp_r,px,py,pz,dx,dy,dz) for(int iat=first; iat<last; ++iat) { T x=(px[iat]-x0)*Linv0; dx[iat]=L0*(x-round(x)); dy[iat]=py[iat]-y0; dz[iat]=pz[iat]-z0; temp_r[iat]=std::sqrt(dx[iat]*dx[iat]+dy[iat]*dy[iat]+dz[iat]*dz[iat]); } } }; /** specialization for a periodic 3D general cell * * Slow method and not used unless one needs to check if faster methods fail */ template<class T> struct DTD_BConds<T,3,PPPX+SOA_OFFSET> { T r00,r10,r20,r01,r11,r21,r02,r12,r22; T g00,g10,g20,g01,g11,g21,g02,g12,g22; T r2max; VectorSoaContainer<T,3> nextcells; DTD_BConds(const CrystalLattice<T,3>& lat) : r00(lat.R(0)),r10(lat.R(3)),r20(lat.R(6)) ,r01(lat.R(1)),r11(lat.R(4)),r21(lat.R(7)) ,r02(lat.R(2)),r12(lat.R(5)),r22(lat.R(8)) ,g00(lat.G(0)),g10(lat.G(3)),g20(lat.G(6)) ,g01(lat.G(1)),g11(lat.G(4)),g21(lat.G(7)) ,g02(lat.G(2)),g12(lat.G(5)),g22(lat.G(8)) ,r2max(lat.CellRadiusSq) { nextcells.resize(26); T* restrict cellx=nextcells.data(0); T* restrict celly=nextcells.data(1); T* restrict cellz=nextcells.data(2); int ic=0; for(int i=-1; i<=1; ++i) for(int j=-1; j<=1; ++j) for(int k=-1; k<=1; ++k) { if(i==0 && j==0 && j==0) continue;//exclude zero cellx[ic]=i*r00+j*r10+k*r20; celly[ic]=i*r01+j*r11+k*r21; cellz[ic]=i*r02+j*r12+k*r22; ++ic; } } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { APP_ABORT("DTD_BConds<T,3,PPPX> not implemented"); } }; /** specialization for a slab, general cell */ template<class T> struct DTD_BConds<T,3,PPNX+SOA_OFFSET> { T r00,r10,r01,r11; T g00,g10,g01,g11; T r2max; VectorSoaContainer<T,3> nextcells; DTD_BConds(const CrystalLattice<T,3>& lat) : r00(lat.R(0)),r10(lat.R(3)) ,r01(lat.R(1)),r11(lat.R(4)) ,g00(lat.G(0)),g10(lat.G(3)) ,g01(lat.G(1)),g11(lat.G(4)) ,r2max(lat.CellRadiusSq) { nextcells.resize(8); T* restrict cellx=nextcells.data(0); T* restrict celly=nextcells.data(1); T* restrict cellz=nextcells.data(2); int ic=0; for(int i=-1; i<=1; ++i) for(int j=-1; j<=1; ++j) { if(i==0 && j ==0) continue; //exclude zero cellx[ic]=i*r00+j*r10; celly[ic]=i*r01+j*r11; cellz[ic]=T(); ++ic; } } template<typename PT, typename RSoA> void computeDistances(const PT& pos, const RSoA& R0, T* restrict temp_r, RSoA& temp_dr, int first, int last, int flip_ind=0) { APP_ABORT("DTD_BConds<T,3,PPNX> not implemented"); } }; } #endif // OHMMS_PARTICLE_BCONDS_3D_H
y_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" #include "timers.h" //--------------------------------------------------------------------- // Performs line solves in Y direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. //--------------------------------------------------------------------- void y_solve() { // printf("yyyyyyyyyy\n"); int i, j, k, m, n, jsize; //kai // int k13; //consistent_data(&k13, "int", 1); //--------------------------------------------------------------------- //--------------------------------------------------------------------- if (timeron) timer_start(t_ysolve); //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // This function computes the left hand side for the three y-factors //--------------------------------------------------------------------- jsize = grid_points[1]-1; //--------------------------------------------------------------------- // Compute the indices for storing the tri-diagonal matrix; // determine a (labeled f) and n jacobians for cell c //--------------------------------------------------------------------- #pragma omp parallel for default(shared) shared(jsize) private(i,j,k,m,n) for (k = k13+1; k <= grid_points[2]-2; k++) { for (i = 1; i <= grid_points[0]-2; i++) { for (j = 0; j <= jsize; j++) { tmp1 = rho_i[k][j][i]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[j][0][0] = 0.0; fjac[j][1][0] = 0.0; fjac[j][2][0] = 1.0; fjac[j][3][0] = 0.0; fjac[j][4][0] = 0.0; fjac[j][0][1] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2; fjac[j][1][1] = u[k][j][i][2] * tmp1; fjac[j][2][1] = u[k][j][i][1] * tmp1; fjac[j][3][1] = 0.0; fjac[j][4][1] = 0.0; fjac[j][0][2] = - ( u[k][j][i][2]*u[k][j][i][2]*tmp2) + c2 * qs[k][j][i]; fjac[j][1][2] = - c2 * u[k][j][i][1] * tmp1; fjac[j][2][2] = ( 2.0 - c2 ) * u[k][j][i][2] * tmp1; fjac[j][3][2] = - c2 * u[k][j][i][3] * tmp1; fjac[j][4][2] = c2; fjac[j][0][3] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[j][1][3] = 0.0; fjac[j][2][3] = u[k][j][i][3] * tmp1; fjac[j][3][3] = u[k][j][i][2] * tmp1; fjac[j][4][3] = 0.0; fjac[j][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * u[k][j][i][2] * tmp2; fjac[j][1][4] = - c2 * u[k][j][i][1]*u[k][j][i][2] * tmp2; fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1 - c2 * ( qs[k][j][i] + u[k][j][i][2]*u[k][j][i][2] * tmp2 ); fjac[j][3][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1; njac[j][0][0] = 0.0; njac[j][1][0] = 0.0; njac[j][2][0] = 0.0; njac[j][3][0] = 0.0; njac[j][4][0] = 0.0; njac[j][0][1] = - c3c4 * tmp2 * u[k][j][i][1]; njac[j][1][1] = c3c4 * tmp1; njac[j][2][1] = 0.0; njac[j][3][1] = 0.0; njac[j][4][1] = 0.0; njac[j][0][2] = - con43 * c3c4 * tmp2 * u[k][j][i][2]; njac[j][1][2] = 0.0; njac[j][2][2] = con43 * c3c4 * tmp1; njac[j][3][2] = 0.0; njac[j][4][2] = 0.0; njac[j][0][3] = - c3c4 * tmp2 * u[k][j][i][3]; njac[j][1][3] = 0.0; njac[j][2][3] = 0.0; njac[j][3][3] = c3c4 * tmp1; njac[j][4][3] = 0.0; njac[j][0][4] = - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( con43 * c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4]; njac[j][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1]; njac[j][2][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][2]; njac[j][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3]; njac[j][4][4] = ( c1345 ) * tmp1; } //--------------------------------------------------------------------- // now joacobians set, so form left hand side in y direction //--------------------------------------------------------------------- lhsinit(lhs, jsize); for (j = 1; j <= jsize-1; j++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[j][AA][0][0] = - tmp2 * fjac[j-1][0][0] - tmp1 * njac[j-1][0][0] - tmp1 * dy1; lhs[j][AA][1][0] = - tmp2 * fjac[j-1][1][0] - tmp1 * njac[j-1][1][0]; lhs[j][AA][2][0] = - tmp2 * fjac[j-1][2][0] - tmp1 * njac[j-1][2][0]; lhs[j][AA][3][0] = - tmp2 * fjac[j-1][3][0] - tmp1 * njac[j-1][3][0]; lhs[j][AA][4][0] = - tmp2 * fjac[j-1][4][0] - tmp1 * njac[j-1][4][0]; lhs[j][AA][0][1] = - tmp2 * fjac[j-1][0][1] - tmp1 * njac[j-1][0][1]; lhs[j][AA][1][1] = - tmp2 * fjac[j-1][1][1] - tmp1 * njac[j-1][1][1] - tmp1 * dy2; lhs[j][AA][2][1] = - tmp2 * fjac[j-1][2][1] - tmp1 * njac[j-1][2][1]; lhs[j][AA][3][1] = - tmp2 * fjac[j-1][3][1] - tmp1 * njac[j-1][3][1]; lhs[j][AA][4][1] = - tmp2 * fjac[j-1][4][1] - tmp1 * njac[j-1][4][1]; lhs[j][AA][0][2] = - tmp2 * fjac[j-1][0][2] - tmp1 * njac[j-1][0][2]; lhs[j][AA][1][2] = - tmp2 * fjac[j-1][1][2] - tmp1 * njac[j-1][1][2]; lhs[j][AA][2][2] = - tmp2 * fjac[j-1][2][2] - tmp1 * njac[j-1][2][2] - tmp1 * dy3; lhs[j][AA][3][2] = - tmp2 * fjac[j-1][3][2] - tmp1 * njac[j-1][3][2]; lhs[j][AA][4][2] = - tmp2 * fjac[j-1][4][2] - tmp1 * njac[j-1][4][2]; lhs[j][AA][0][3] = - tmp2 * fjac[j-1][0][3] - tmp1 * njac[j-1][0][3]; lhs[j][AA][1][3] = - tmp2 * fjac[j-1][1][3] - tmp1 * njac[j-1][1][3]; lhs[j][AA][2][3] = - tmp2 * fjac[j-1][2][3] - tmp1 * njac[j-1][2][3]; lhs[j][AA][3][3] = - tmp2 * fjac[j-1][3][3] - tmp1 * njac[j-1][3][3] - tmp1 * dy4; lhs[j][AA][4][3] = - tmp2 * fjac[j-1][4][3] - tmp1 * njac[j-1][4][3]; lhs[j][AA][0][4] = - tmp2 * fjac[j-1][0][4] - tmp1 * njac[j-1][0][4]; lhs[j][AA][1][4] = - tmp2 * fjac[j-1][1][4] - tmp1 * njac[j-1][1][4]; lhs[j][AA][2][4] = - tmp2 * fjac[j-1][2][4] - tmp1 * njac[j-1][2][4]; lhs[j][AA][3][4] = - tmp2 * fjac[j-1][3][4] - tmp1 * njac[j-1][3][4]; lhs[j][AA][4][4] = - tmp2 * fjac[j-1][4][4] - tmp1 * njac[j-1][4][4] - tmp1 * dy5; lhs[j][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[j][0][0] + tmp1 * 2.0 * dy1; lhs[j][BB][1][0] = tmp1 * 2.0 * njac[j][1][0]; lhs[j][BB][2][0] = tmp1 * 2.0 * njac[j][2][0]; lhs[j][BB][3][0] = tmp1 * 2.0 * njac[j][3][0]; lhs[j][BB][4][0] = tmp1 * 2.0 * njac[j][4][0]; lhs[j][BB][0][1] = tmp1 * 2.0 * njac[j][0][1]; lhs[j][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[j][1][1] + tmp1 * 2.0 * dy2; lhs[j][BB][2][1] = tmp1 * 2.0 * njac[j][2][1]; lhs[j][BB][3][1] = tmp1 * 2.0 * njac[j][3][1]; lhs[j][BB][4][1] = tmp1 * 2.0 * njac[j][4][1]; lhs[j][BB][0][2] = tmp1 * 2.0 * njac[j][0][2]; lhs[j][BB][1][2] = tmp1 * 2.0 * njac[j][1][2]; lhs[j][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[j][2][2] + tmp1 * 2.0 * dy3; lhs[j][BB][3][2] = tmp1 * 2.0 * njac[j][3][2]; lhs[j][BB][4][2] = tmp1 * 2.0 * njac[j][4][2]; lhs[j][BB][0][3] = tmp1 * 2.0 * njac[j][0][3]; lhs[j][BB][1][3] = tmp1 * 2.0 * njac[j][1][3]; lhs[j][BB][2][3] = tmp1 * 2.0 * njac[j][2][3]; lhs[j][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[j][3][3] + tmp1 * 2.0 * dy4; lhs[j][BB][4][3] = tmp1 * 2.0 * njac[j][4][3]; lhs[j][BB][0][4] = tmp1 * 2.0 * njac[j][0][4]; lhs[j][BB][1][4] = tmp1 * 2.0 * njac[j][1][4]; lhs[j][BB][2][4] = tmp1 * 2.0 * njac[j][2][4]; lhs[j][BB][3][4] = tmp1 * 2.0 * njac[j][3][4]; lhs[j][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[j][4][4] + tmp1 * 2.0 * dy5; lhs[j][CC][0][0] = tmp2 * fjac[j+1][0][0] - tmp1 * njac[j+1][0][0] - tmp1 * dy1; lhs[j][CC][1][0] = tmp2 * fjac[j+1][1][0] - tmp1 * njac[j+1][1][0]; lhs[j][CC][2][0] = tmp2 * fjac[j+1][2][0] - tmp1 * njac[j+1][2][0]; lhs[j][CC][3][0] = tmp2 * fjac[j+1][3][0] - tmp1 * njac[j+1][3][0]; lhs[j][CC][4][0] = tmp2 * fjac[j+1][4][0] - tmp1 * njac[j+1][4][0]; lhs[j][CC][0][1] = tmp2 * fjac[j+1][0][1] - tmp1 * njac[j+1][0][1]; lhs[j][CC][1][1] = tmp2 * fjac[j+1][1][1] - tmp1 * njac[j+1][1][1] - tmp1 * dy2; lhs[j][CC][2][1] = tmp2 * fjac[j+1][2][1] - tmp1 * njac[j+1][2][1]; lhs[j][CC][3][1] = tmp2 * fjac[j+1][3][1] - tmp1 * njac[j+1][3][1]; lhs[j][CC][4][1] = tmp2 * fjac[j+1][4][1] - tmp1 * njac[j+1][4][1]; lhs[j][CC][0][2] = tmp2 * fjac[j+1][0][2] - tmp1 * njac[j+1][0][2]; lhs[j][CC][1][2] = tmp2 * fjac[j+1][1][2] - tmp1 * njac[j+1][1][2]; lhs[j][CC][2][2] = tmp2 * fjac[j+1][2][2] - tmp1 * njac[j+1][2][2] - tmp1 * dy3; lhs[j][CC][3][2] = tmp2 * fjac[j+1][3][2] - tmp1 * njac[j+1][3][2]; lhs[j][CC][4][2] = tmp2 * fjac[j+1][4][2] - tmp1 * njac[j+1][4][2]; lhs[j][CC][0][3] = tmp2 * fjac[j+1][0][3] - tmp1 * njac[j+1][0][3]; lhs[j][CC][1][3] = tmp2 * fjac[j+1][1][3] - tmp1 * njac[j+1][1][3]; lhs[j][CC][2][3] = tmp2 * fjac[j+1][2][3] - tmp1 * njac[j+1][2][3]; lhs[j][CC][3][3] = tmp2 * fjac[j+1][3][3] - tmp1 * njac[j+1][3][3] - tmp1 * dy4; lhs[j][CC][4][3] = tmp2 * fjac[j+1][4][3] - tmp1 * njac[j+1][4][3]; lhs[j][CC][0][4] = tmp2 * fjac[j+1][0][4] - tmp1 * njac[j+1][0][4]; lhs[j][CC][1][4] = tmp2 * fjac[j+1][1][4] - tmp1 * njac[j+1][1][4]; lhs[j][CC][2][4] = tmp2 * fjac[j+1][2][4] - tmp1 * njac[j+1][2][4]; lhs[j][CC][3][4] = tmp2 * fjac[j+1][3][4] - tmp1 * njac[j+1][3][4]; lhs[j][CC][4][4] = tmp2 * fjac[j+1][4][4] - tmp1 * njac[j+1][4][4] - tmp1 * dy5; } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(JMAX) and rhs'(JMAX) will be sent to next cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[k][0][i] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][0][i] ); //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- for (j = 1; j <= jsize-1; j++) { //------------------------------------------------------------------- // subtract A*lhs_vector(j-1) from lhs_vector(j) // // rhs(j) = rhs(j) - A*rhs(j-1) //------------------------------------------------------------------- matvec_sub(lhs[j][AA], rhs[k][j-1][i], rhs[k][j][i]); //------------------------------------------------------------------- // B(j) = B(j) - C(j-1)*A(j) //------------------------------------------------------------------- matmul_sub(lhs[j][AA], lhs[j-1][CC], lhs[j][BB]); //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs //------------------------------------------------------------------- binvcrhs( lhs[j][BB], lhs[j][CC], rhs[k][j][i] ); } //--------------------------------------------------------------------- // rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) //--------------------------------------------------------------------- matvec_sub(lhs[jsize][AA], rhs[k][jsize-1][i], rhs[k][jsize][i]); //--------------------------------------------------------------------- // B(jsize) = B(jsize) - C(jsize-1)*A(jsize) // matmul_sub(AA,i,jsize,k,c, // $ CC,i,jsize-1,k,c,BB,i,jsize,k) //--------------------------------------------------------------------- matmul_sub(lhs[jsize][AA], lhs[jsize-1][CC], lhs[jsize][BB]); //--------------------------------------------------------------------- // multiply rhs(jsize) by b_inverse(jsize) and copy to rhs //--------------------------------------------------------------------- binvrhs( lhs[jsize][BB], rhs[k][jsize][i] ); //--------------------------------------------------------------------- // back solve: if last cell, then generate U(jsize)=rhs(jsize) // else assume U(jsize) is loaded in un pack backsub_info // so just use it // after u(jstart) will be sent to next cell //--------------------------------------------------------------------- for (j = jsize-1; j >= 0; j--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][CC][n][m]*rhs[k][j+1][i][n]; } } } } //kai k13 = 0; // printf("k13=%p\n", &k13); } if (timeron) timer_stop(t_ysolve); }
convolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 4*outw + w*3; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); #else const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v7.4s}, [%1] \n" // v7 = outptr "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "prfm pldl1keep, [%4, #512] \n" "prfm pldl1keep, [%5, #512] \n" "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v6.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v14.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v15.4s, v12.4s, v13.4s \n" // "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward. "faddp v14.4s, v14.4s, v15.4s \n" "faddp v5.4s , v5.4s, v14.4s \n" "fadd v7.4s, v7.4s, v5.4s \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "0: \n" "pld [%2, #512] \n" "pld [%3, #512] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "pld [%4, #512] \n" "pld [%5, #512] \n" "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q5, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q6, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q14, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q15, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d28, d28, d29 \n" "vadd.f32 d11, d12, d13 \n" "vadd.f32 d29, d30, d31 \n" "vpadd.f32 d10, d10, d11 \n" "vpadd.f32 d11, d28, d29 \n" "vadd.f32 q7, q7, q5 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float sum = 0.f; asm volatile( "ld1 {v8.4s}, [%0], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%1], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %9.4s \n" "fmul v13.4s, v9.4s, %10.4s \n" "ld1 {v10.4s}, [%2], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%3], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %11.4s \n" "fmla v13.4s, v11.4s, %12.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v5.4s \n" "faddp s5, v5.2s \n" "fmov %w4, s5 \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13" ); *outptr += sum; #else float sum = 0.f; asm volatile( "vld1.f32 {d16-d17}, [%0]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%1]! \n"// q9 = r1 "vmul.f32 q12, q8, %q9 \n" "vmul.f32 q13, q9, %q10 \n" "vld1.f32 {d20-d21}, [%2]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%3]! \n"// q11 = r3 "vmla.f32 q12, q10, %q11 \n" "vmla.f32 q13, q11, %q12 \n" "vadd.f32 q5, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vpadd.f32 d10, d10, d10 \n" "vmov.f32 %4, d10[0] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13" ); *outptr += sum; #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; *outptr += sum; r0 += 4; r1 += 4; r2 += 4; r3 += 4; #endif // __ARM_NEON outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } } }
SparsifyCollapsedGalerkinMex_new.c
#include "mex.h" #include <omp.h> #include <math.h> //cd .\MEXfunc ; mex -O 'CXXOPTIMFLAGS=-DNDEBUG -O3' -largeArrayDims SparsifyCollapsedGalerkinMex_new.c COMPFLAGS="$COMPFLAGS -openmp" LINKFALGS="$LINKFALGS -openmp"; cd .\.. /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// typedef struct member_t{ double v_vec; mwIndex i_vec; mwIndex j_mat; mwIndex global_mat; } member; int FirstSmaller(member* A, member* B){ return (A->j_mat < B->j_mat); } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// typedef struct Heap_t{ int heapSize; int maxSize; member** arr; } Heap; void Insert(Heap* heap, member* element) { int par; int now = heap->heapSize; if (now == heap->maxSize-1){ printf("ERROR: Heap not big enough for size %d Increase MAX_HEAP_SIZE)!\n",heap->maxSize); return; } heap->heapSize++; while (now > 0){ par = (now - 1)>>1; if (FirstSmaller(element,heap->arr[par])){ heap->arr[now] = heap->arr[par]; now = par; }else{ break; } } heap->arr[now] = element; } member* DeleteMin(Heap* heap) { int l,r,child,k=0; member* least = heap->arr[k]; member* x; if (heap->heapSize==0){ return NULL; } --heap->heapSize; x = heap->arr[heap->heapSize]; while (1) { l = 2 * k + 1;; if (l >= heap->heapSize) { break; }else { r = 2 * (k + 1); child = (r >= heap->heapSize || (FirstSmaller(heap->arr[l],heap->arr[r]))) ? l : r; if (FirstSmaller(heap->arr[child],x)) { heap->arr[k] = heap->arr[child]; k = child; } else{ break; } } } heap->arr[k] = x; // Prevent leakage...? return least; } /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////// int intersect(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, mwIndex startB, mwIndex endB, mwIndex* AB, mwIndex* iA, mwIndex* iB); mwIndex findLinearSearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind); mwIndex findBinarySearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind); mwIndex MultyplyRowTimesMat(mwIndex *vec_ind , double *vec_vals, mwIndex nnz_vec, mwIndex *C_mat , mwIndex *starts_mat , double *V_mat, mwIndex *ans_ind , double *ans_vals, Heap* heap, member* members); int SparsifyAki(mwIndex k, mwIndex i, double Aki, mwIndex* R_A0 , mwIndex *starts_A0 , double *V_A0, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols, double *V_A0_cols, mwIndex *intI1I2 , mwIndex *auxI1 , mwIndex *auxI2, mwIndex *auxm1 , mwIndex *auxm2 , double* thetta, mwIndex* globalDiagonal , int MAX_HEAP_SIZE_nc); void MultyplyAndSparsify(mwIndex startRow, mwIndex endRow, mwIndex n, mwIndex* C_A0 , mwIndex *starts_A0, double *V_A0, mwIndex *C_R , mwIndex *starts_R , double *V_R, mwIndex *C_A , mwIndex *starts_A , double *V_A, mwIndex *C_P , mwIndex *starts_P , double *V_P, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols,double *V_A0_cols, mwIndex* globalDiagonal, int MAX_HEAP_SIZE_nc,int MAX_HEAP_SIZE_n){ mwIndex k,global_idx,nnz_in_row,i, *aux_ind , global_idx0,tmp0, tmpOmega , sparsify_indcator,ans; mwIndex *intI1I2, *auxI1, *auxI2, *auxm1, *auxm2; double *thetta; double Aki; member* members; double *aux_vals; Heap heap; heap.heapSize = 0; heap.maxSize = MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n; members = (member*)malloc(heap.maxSize*sizeof(member)); heap.arr = (member**)malloc(heap.maxSize*sizeof(member*)); aux_ind = (mwIndex*)malloc(MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n*sizeof(mwIndex)); aux_vals = (double*)malloc(MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n*MAX_HEAP_SIZE_n*sizeof(double)); intI1I2 = (mwIndex*)malloc(MAX_HEAP_SIZE_nc*sizeof(mwIndex)); auxI1 = (mwIndex*)malloc(MAX_HEAP_SIZE_nc*sizeof(mwIndex)); auxI2 = (mwIndex*)malloc(MAX_HEAP_SIZE_nc*sizeof(mwIndex)); auxm1 = (mwIndex*)malloc(MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*sizeof(mwIndex)); auxm2 = (mwIndex*)malloc(MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*sizeof(mwIndex)); thetta = (double*)malloc(MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*MAX_HEAP_SIZE_nc*sizeof(double)); for (k = startRow ; k < endRow ;++k){ global_idx = starts_R[k]; nnz_in_row = starts_R[k+1] - starts_R[k]; if (nnz_in_row > heap.maxSize){ printf("heap size is set to %d and we need %d",heap.maxSize,nnz_in_row); } nnz_in_row = MultyplyRowTimesMat(C_R+global_idx, V_R+global_idx, nnz_in_row, C_A, starts_A, V_A, aux_ind , aux_vals, &heap, members); if (nnz_in_row > heap.maxSize){ printf("heap size is set to %d and we need %d",heap.maxSize,nnz_in_row); } nnz_in_row = MultyplyRowTimesMat(aux_ind, aux_vals, nnz_in_row, C_P, starts_P, V_P, aux_ind , aux_vals, &heap, members); // for (i = 0 ; i < nnz_in_row ;++i){ // V_A0[starts_A0[k] + i] = aux_vals[i]; // C_A0[starts_A0[k] + i] = aux_ind[i]; // } // SPLITTING global_idx0 = starts_A0[k]; i = 0; while (i<nnz_in_row && global_idx0 < starts_A0[k+1]){ tmp0 = C_A0[global_idx0]; tmpOmega = aux_ind[i]; if (tmp0 == tmpOmega){ #pragma omp atomic V_A0[global_idx0] += aux_vals[i]; global_idx0++; i++; // printf("exit 1\n"); }else{ if (tmp0 < tmpOmega){ global_idx0++; // printf("exit 2\n"); }else{ ans = SparsifyAki(k,aux_ind[i],aux_vals[i],C_A0 , starts_A0, V_A0, R_RP0, starts_RP0, V_RP0, C_R0P, starts_R0P, V_R0P, C_A0_cols, starts_A0_cols, V_A0_cols, intI1I2, auxI1, auxI2, auxm1, auxm2, thetta, globalDiagonal,MAX_HEAP_SIZE_nc); // printf("Sparsifing (%d,%d,%lf)\n",k,aux_ind[i],aux_vals[i]); i++; } } } while (i<nnz_in_row){ ans = SparsifyAki(k,aux_ind[i],aux_vals[i],C_A0 , starts_A0, V_A0, R_RP0, starts_RP0, V_RP0, C_R0P, starts_R0P, V_R0P, C_A0_cols, starts_A0_cols, V_A0_cols, intI1I2, auxI1, auxI2, auxm1, auxm2, thetta, globalDiagonal,MAX_HEAP_SIZE_nc); // printf("Sparsifing (%d,%d,%lf)\n",k,aux_ind[i],aux_vals[i]); i++; } } free(aux_ind); free(aux_vals); free(members); free(heap.arr); free(intI1I2); free(auxI1); free(auxI2); free(auxm1); free(auxm2); free(thetta); } int SparsifyAki(mwIndex k, mwIndex i, double Aki, mwIndex* R_A0 , mwIndex *starts_A0 , double *V_A0, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols, double *V_A0_cols, mwIndex *intI1I2 , mwIndex *auxI1 , mwIndex *auxI2, mwIndex *auxm1 , mwIndex *auxm2 , double* thetta, mwIndex* globalDiagonal ,mwIndex MAX_HEAP_SIZE_nc){ mwIndex m1,m2, it, n_intersect, n_intersect2, m1it, list_it, global_km2,global_m1i,global_m1m1,global_m2m2,global_m2m1; double delta,R0Pm1i; int ans = 0; // printf("k:%ld, i:%ld, Aki: %lf\n",k,i,Aki); if (fabs(Aki)<1e-14){ ans = 1; return; } // Now we check distance 2 paths between i and k n_intersect = intersect(C_R0P , R_RP0, starts_R0P[i] , starts_R0P[i+1], starts_RP0[k], starts_RP0[k+1],intI1I2, auxI1, auxI2); //printf("n_intersect: %d\n",n_intersect); if (n_intersect > 0){ delta = 0; for (it = 0 ; it < n_intersect ; it++){ thetta[it] = fabs(V_R0P[auxI1[it]]*V_RP0[auxI2[it]]); //printf("mex: V_R0P:%lf; V_RP0: %lf\n",V_R0P[auxI1[it]],V_RP0[auxI2[it]]); delta += thetta[it]; } delta = Aki/delta; for (it = 0 ; it < n_intersect ; it++){ thetta[it] *= delta; } for (it = 0 ; it < n_intersect ; it++){ m1 = intI1I2[it]; delta = thetta[it]; // printf("mex: %ld->%ld->%ld:%lf\n",i,m1,k,delta); // Here: m1==m2 // here we assume that A0 contains R0P and P0R global_km2 = findBinarySearch(R_A0, starts_A0[k], starts_A0[k+1], m1); global_m1i = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], i); global_m1m1 = globalDiagonal[m1]; // global_m1m1 = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], m1); #pragma omp atomic V_A0[global_km2] += delta; #pragma omp atomic V_A0[global_m1i] += delta; #pragma omp atomic V_A0[global_m1m1] -= delta; ans = 1; } }else{ // Now there is no distance 2 path. We seek distance 3 paths. list_it = 0; delta = 0; for (m1it = starts_R0P[i] ; m1it < starts_R0P[i+1] ; m1it++){ m1 = C_R0P[m1it]; R0Pm1i = V_R0P[m1it]; n_intersect2 = intersect(C_A0_cols , R_RP0, starts_A0_cols[m1] , starts_A0_cols[m1+1], starts_RP0[k], starts_RP0[k+1],intI1I2, auxI1, auxI2); for (it = 0 ; it < n_intersect2 ; it++){ auxm1[list_it] = m1; auxm2[list_it] = intI1I2[it]; thetta[list_it] = fabs(R0Pm1i*V_A0_cols[auxI1[it]]*V_RP0[auxI2[it]]); delta += thetta[list_it]; ++list_it; if (list_it == MAX_HEAP_SIZE_nc){ printf("Error: we're out of place in auxiliary arrays, increase MAX_HEAP_SIZE_nc\n"); return; } } } delta = Aki/delta; for (it = 0 ; it < list_it ; it++){ thetta[it] *= delta; } for (it = 0 ; it < list_it ; it++){ m1 = auxm1[it]; m2 = auxm2[it]; delta = thetta[it]; //printf("mex: %ld->%ld->%ld->%ld:%lf\n",i,m1,m2,k,delta); global_km2 = findBinarySearch(R_A0, starts_A0[k], starts_A0[k+1], m2); global_m1i = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], i); // global_m1m1 = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], m1); // global_m2m2 = findBinarySearch(R_A0, starts_A0[m2], starts_A0[m2+1], m2); global_m2m2 = globalDiagonal[m2]; global_m1m1 = globalDiagonal[m1]; global_m2m1 = findBinarySearch(R_A0, starts_A0[m2], starts_A0[m2+1], m1); #pragma omp atomic V_A0[global_m2m2] -= delta; #pragma omp atomic V_A0[global_m2m1] += delta; #pragma omp atomic V_A0[global_km2] += delta; #pragma omp atomic V_A0[global_m1i] += delta; #pragma omp atomic V_A0[global_m1m1] -= delta; ans = 1; } } if (ans==0){ printf("Element (%ld,%ld,%.16lf) not sparsified!!!\n",k,i,Aki); } } mwIndex MultyplyRowTimesMat(mwIndex *vec_ind , double *vec_vals, mwIndex nnz_vec, mwIndex *C_mat , mwIndex *starts_mat , double *V_mat, mwIndex *ans_ind , double *ans_vals, Heap* heap, member* members){ mwIndex k; double ans_tmp_v = 0.0; mwIndex ans_tmp_global = 0; member *curr_min; heap->heapSize = 0; if (nnz_vec == 0){ return ans_tmp_global; } for (k = 0 ; k < nnz_vec ; ++k){ curr_min = &members[k]; curr_min->v_vec = vec_vals[k]; curr_min->i_vec = vec_ind[k]; curr_min->global_mat = starts_mat[vec_ind[k]]; curr_min->j_mat = C_mat[curr_min->global_mat]; Insert(heap,curr_min); } // here we're done reading from vec_ind. It will be run over if vec_ind==ans_ind curr_min = DeleteMin(heap); ans_ind[0] = curr_min->j_mat; while (curr_min!=NULL){ if (curr_min->j_mat == ans_ind[ans_tmp_global]){ // // here we add to the answer ans_tmp_v += curr_min->v_vec*V_mat[curr_min->global_mat]; }else{ ans_vals[ans_tmp_global] = ans_tmp_v; ans_tmp_global++; ans_tmp_v = curr_min->v_vec*V_mat[curr_min->global_mat]; ans_ind[ans_tmp_global] = curr_min->j_mat; } curr_min->global_mat++; if (curr_min->global_mat < starts_mat[curr_min->i_vec+1]){ curr_min->j_mat = C_mat[curr_min->global_mat]; Insert(heap,curr_min); } curr_min = DeleteMin(heap); } ans_vals[ans_tmp_global]=ans_tmp_v; ans_tmp_global++; return ans_tmp_global; } void GenerateDiagonalGlobalIndices(mwIndex startRow, mwIndex endRow, mwIndex* C_A0, mwIndex* starts_A0, mwIndex* globalDiagonal){ mwIndex k; for (k = startRow ; k < endRow ;++k){ globalDiagonal[k] = findLinearSearch(C_A0, starts_A0[k], starts_A0[k+1], k); } } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { mwIndex id, Nthrds, istart, iend,k; mwIndex *C_A0 = mxGetIr(prhs[0]); mwIndex *starts_A0 = mxGetJc(prhs[0]); double *V_A0 = mxGetPr(prhs[0]); mwIndex nzmax_A0 = mxGetNzmax(prhs[0]); mwIndex nc = mxGetN(prhs[0]); mwIndex *C_R = mxGetIr(prhs[1]); mwIndex *starts_R = mxGetJc(prhs[1]); double *V_R = mxGetPr(prhs[1]); mwIndex *C_A = mxGetIr(prhs[2]); mwIndex *starts_A = mxGetJc(prhs[2]); double *V_A = mxGetPr(prhs[2]); mwIndex n = mxGetN(prhs[2]); mwIndex *C_P = mxGetIr(prhs[3]); mwIndex *starts_P = mxGetJc(prhs[3]); double *V_P = mxGetPr(prhs[3]); mwIndex *R_RP0 = mxGetIr(prhs[4]); mwIndex *starts_RP0 = mxGetJc(prhs[4]); double *V_RP0 = mxGetPr(prhs[4]); mwIndex *C_R0P = mxGetIr(prhs[5]); mwIndex *starts_R0P = mxGetJc(prhs[5]); double *V_R0P = mxGetPr(prhs[5]); mwIndex *C_A0_cols = mxGetIr(prhs[6]); mwIndex *starts_A0_cols = mxGetJc(prhs[6]); double *V_A0_cols = mxGetPr(prhs[6]); mwIndex* globalDiagonal = (mwIndex*)malloc(nc*sizeof(mwIndex)); mwIndex MAX_HEAP_SIZE_nc = 0; mwIndex MAX_HEAP_SIZE_n = 0; for (k = 0 ; k < nc ;++k){ if ((starts_A0[k+1] - starts_A0[k]) > MAX_HEAP_SIZE_nc){ MAX_HEAP_SIZE_nc = (starts_A0[k+1] - starts_A0[k]); } } for (k = 0 ; k < n ;++k){ if ((starts_A[k+1] - starts_A[k])>MAX_HEAP_SIZE_n){ MAX_HEAP_SIZE_n = (starts_A[k+1] - starts_A[k]); } } #pragma omp parallel private(id, Nthrds, istart, iend, k) num_threads(omp_get_num_procs()/2) { id = omp_get_thread_num(); //printf("num threads: %u, ",omp_get_num_threads()); Nthrds = omp_get_num_threads(); istart = id * nzmax_A0 / Nthrds; iend = (id+1) * nzmax_A0 / Nthrds; if (id == Nthrds-1) iend = nzmax_A0; for (k = istart ; k < iend ; ++k){ V_A0[k] = 0.0; } #pragma omp barrier istart = id * nc / Nthrds; iend = (id+1) * nc / Nthrds; if (id == Nthrds-1) iend = nc; GenerateDiagonalGlobalIndices(istart,iend,C_A0,starts_A0,globalDiagonal); #pragma omp barrier MultyplyAndSparsify(istart, iend, nc, C_A0 , starts_A0, V_A0, C_R, starts_R, V_R, C_A , starts_A , V_A, C_P, starts_P, V_P, R_RP0, starts_RP0, V_RP0, C_R0P, starts_R0P, V_R0P, C_A0_cols, starts_A0_cols, V_A0_cols, globalDiagonal, MAX_HEAP_SIZE_nc,MAX_HEAP_SIZE_n); #pragma omp barrier } free(globalDiagonal); } mwIndex findLinearSearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind){ mwIndex mid; for (mid = startA ; mid < endA ; ++mid){ if (A[mid]==toFind){ return mid; } } printf("LINEAR: INDEX NOT FOUND!!! IMPOSSIBLE - THERE'S A BUG see %d\n",toFind); return 0; } mwIndex findBinarySearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind){ mwIndex mid; if (endA - startA < 50){ mid = findLinearSearch( A, startA, endA, toFind); return mid; }else{ endA--; // continually narrow search until just one element remains while (startA < endA) { mid = (startA+endA)>>1; // reduce the search if (A[mid] < toFind){ startA = mid+1; }else{ endA = mid; } } if (A[startA] == toFind) return startA; else{ printf("BINARY: INDEX NOT FOUND!!! IMPOSSIBLE - THERE'S A BUG\n"); return 0; } } } int intersect(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, mwIndex startB, mwIndex endB, mwIndex* AB, mwIndex* iA, mwIndex* iB) { mwIndex ia = startA, ib = startB; mwIndex k = 0; mwIndex tmpA,tmpB; if (A[startA] > B[endB-1] || A[endA-1] < B[startB]){ return k; } while ((ia < endA) && (ib < endB)){ tmpA = A[ia]; tmpB = B[ib]; if (tmpA == tmpB){ AB[k] = tmpA; iA[k] = ia++; iB[k] = ib++; ++k; }else{ ia += tmpA < tmpB; ib += tmpA > tmpB; } } return k; }
pr59669-1.c
/* PR middle-end/59669 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ #pragma omp declare simd linear(a) void foo (int a) { }
GB_dense_subassign_06d_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_06d_template: C<A> = A where C is dense or bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and A //-------------------------------------------------------------------------- ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const int8_t *restrict Ab = A->b ; const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const int64_t avlen = A->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_is_dense = GB_as_if_full (A) ; const int64_t anz = GB_NNZ_HELD (A) ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; int8_t *restrict Cb = C->b ; const int64_t cvlen = C->vlen ; const bool C_is_bitmap = GB_IS_BITMAP (C) ; //-------------------------------------------------------------------------- // C<A> = A //-------------------------------------------------------------------------- int64_t cnvals = C->nvals ; // for C bitmap if (A_is_dense) { //---------------------------------------------------------------------- // A is dense: all entries present //---------------------------------------------------------------------- if (C_is_bitmap) { //------------------------------------------------------------------ // C is bitmap, A is dense //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with C bitmap, A dense int64_t p ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } GB_memset (Cb, 1, anz, A_nthreads) ; cnvals = anz ; } else { // C<A>=A with C bitmap, A dense int tid ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) reduction(+:cnvals) for (tid = 0 ; tid < A_nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, A_nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } } else { //------------------------------------------------------------------ // C is hypersparse, sparse, or full, with all entries present //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with C sparse/hyper/full int64_t p ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } else { // C<A>=A with C sparse/hyper/full int64_t p ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) for (p = 0 ; p < anz ; p++) { if (GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } } } else if (A_is_bitmap) { //---------------------------------------------------------------------- // A is bitmap //---------------------------------------------------------------------- if (C_is_bitmap) { //------------------------------------------------------------------ // C is bitmap, A is bitmap //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with A and C bitmap int tid ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) reduction(+:cnvals) for (tid = 0 ; tid < A_nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, A_nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (Ab [p]) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } else { // C<A>=A with A and C bitmap int tid ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) reduction(+:cnvals) for (tid = 0 ; tid < A_nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, A_nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (Ab [p] && GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } } else { //------------------------------------------------------------------ // C is hypersparse, sparse, or full, with all entries present //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with A bitmap, and C hyper/sparse/full // this method is used by LAGraph_bfs_parent when q is // a bitmap and pi is full. int64_t p ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] if (Ab [p]) { GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } else { // C<A>=A with A bitmap, and C hyper/sparse/full int64_t p ; #pragma omp parallel for num_threads(A_nthreads) \ schedule(static) for (p = 0 ; p < anz ; p++) { if (Ab [p] && GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } } } else { //---------------------------------------------------------------------- // A is hypersparse or sparse; C is dense or a bitmap //---------------------------------------------------------------------- const int64_t *restrict kfirst_Aslice = A_ek_slicing ; const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ; const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ; int taskid ; if (Mask_struct) { if (C_is_bitmap) { //-------------------------------------------------------------- // C is bitmap, mask is structural //-------------------------------------------------------------- #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; int64_t task_cnvals = 0 ; // C<A(:,kfirst:klast)> = A(:,kfirst:klast) for (int64_t k = kfirst ; k <= klast ; k++) { // get A(:,j), the kth vector of A int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; // pC is the start of C(:,j) int64_t pC = j * cvlen ; // C<A(:,j),struct>=A(:,j) with C bitmap, A sparse GB_PRAGMA_SIMD_REDUCTION (+,task_cnvals) for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t p = pC + Ai [pA] ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } else { //-------------------------------------------------------------- // C is full, mask is structural //-------------------------------------------------------------- #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; // C<A(:,kfirst:klast)> = A(:,kfirst:klast) for (int64_t k = kfirst ; k <= klast ; k++) { // get A(:,j), the kth vector of A int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; // pC is the start of C(:,j) int64_t pC = j * cvlen ; // C<A(:,j),struct>=A(:,j) with C full, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t p = pC + Ai [pA] ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; } } } } } else { if (C_is_bitmap) { //-------------------------------------------------------------- // C is bitmap, mask is valued //-------------------------------------------------------------- #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; int64_t task_cnvals = 0 ; // C<A(:,kfirst:klast)> = A(:,kfirst:klast) for (int64_t k = kfirst ; k <= klast ; k++) { // get A(:,j), the kth vector of A int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; // pC is the start of C(:,j) int64_t pC = j * cvlen ; // C<A(:,j),struct>=A(:,j) with C bitmap, A sparse GB_PRAGMA_SIMD_REDUCTION (+,task_cnvals) for (int64_t pA = pA_start ; pA < pA_end ; pA++) { if (GB_AX_MASK (Ax, pA, asize)) { int64_t p = pC + Ai [pA] ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } } cnvals += task_cnvals ; } } else { //-------------------------------------------------------------- // C is full, mask is valued //-------------------------------------------------------------- #pragma omp parallel for num_threads(A_nthreads) \ schedule(dynamic,1) reduction(+:cnvals) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; // C<A(:,kfirst:klast)> = A(:,kfirst:klast) for (int64_t k = kfirst ; k <= klast ; k++) { // get A(:,j), the kth vector of A int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; // pC is the start of C(:,j) int64_t pC = j * cvlen ; // C<A(:,j),struct>=A(:,j) with C dense, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { if (GB_AX_MASK (Ax, pA, asize)) { int64_t p = pC + Ai [pA] ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; } } } } } } } //-------------------------------------------------------------------------- // log the number of entries in the C bitmap //-------------------------------------------------------------------------- if (C_is_bitmap) { C->nvals = cnvals ; } }
GB_assign_zombie3.c
//------------------------------------------------------------------------------ // GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all // entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is // zero. This step is not done for GxB_*_subassign, since that method does not // modify anything outside IxJ. // GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other. // C must be sparse or hypersparse. // M can have any sparsity structure: hypersparse, sparse, bitmap, or full // C->iso is not affected. #include "GB_assign.h" #include "GB_assign_zombie.h" #include "GB_subassign_methods.h" void GB_assign_zombie3 ( GrB_Matrix C, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const int64_t j, // vector index with entries to delete const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (!GB_PENDING (C)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_JUMBLED (M)) ; // binary search on M ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get C (:,j) //-------------------------------------------------------------------------- const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; int64_t pC_start, pC_end, pleft = 0, pright = C->nvec-1 ; GB_lookup (C->h != NULL, Ch, Cp, C->vlen, &pleft, pright, j, &pC_start, &pC_end) ; int64_t nzombies = C->nzombies ; const int64_t zjnz = pC_end - pC_start ; //-------------------------------------------------------------------------- // get M(:,0) //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int8_t *restrict Mb = M->b ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t Mvlen = M->vlen ; int64_t pM_start = 0 ; // Mp [0] int64_t pM_end = GBP (Mp, 1, Mvlen) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // delete entries from C(:,j) that are outside I, if the mask M allows it //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t p1, p2 ; GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ; for (int64_t pC = pC_start + p1 ; pC < pC_start + p2 ; pC++) { //------------------------------------------------------------------ // get C(i,j) //------------------------------------------------------------------ int64_t i = Ci [pC] ; if (!GB_IS_ZOMBIE (i)) { //-------------------------------------------------------------- // C(i,j) is outside C(I,j) if i is not in the list I //-------------------------------------------------------------- bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ; if (i_outside) { //---------------------------------------------------------- // C(i,j) is a live entry not in the C(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (i) ; if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete C(i,j) by marking it as a zombie nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; }
GB_binop__land_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_bool) // A.*B function (eWiseMult): GB (_AemultB_08__land_bool) // A.*B function (eWiseMult): GB (_AemultB_02__land_bool) // A.*B function (eWiseMult): GB (_AemultB_04__land_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_bool) // A*D function (colscale): GB (_AxD__land_bool) // D*A function (rowscale): GB (_DxB__land_bool) // C+=B function (dense accum): GB (_Cdense_accumB__land_bool) // C+=b function (dense accum): GB (_Cdense_accumb__land_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_bool) // C=scalar+B GB (_bind1st__land_bool) // C=scalar+B' GB (_bind1st_tran__land_bool) // C=A+scalar GB (_bind2nd__land_bool) // C=A'+scalar GB (_bind2nd_tran__land_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij && bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x && y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x && bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij && y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x && aij) ; \ } GrB_Info GB (_bind1st_tran__land_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij && y) ; \ } GrB_Info GB (_bind2nd_tran__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zsyssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ void core_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq) { int ione = 1; if (uplo == PlasmaUpper) { for (int j = 1; j < n; j++) // TODO: Inline this operation. LAPACK_zlassq(&j, &A[lda*j], &ione, scale, sumsq); } else { // PlasmaLower for (int j = 0; j < n-1; j++) { int len = n-j-1; // TODO: Inline this operation. LAPACK_zlassq(&len, &A[lda*j+j+1], &ione, scale, sumsq); } } *sumsq *= 2.0; for (int i = 0; i < n; i++) { // diagonal is complex, don't ignore complex part double absa = cabs(A[lda*i+i]); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } } /******************************************************************************/ void core_omp_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; core_zsyssq(uplo, n, A, lda, scale, sumsq); } } } /******************************************************************************/ void core_omp_zsyssq_aux(int m, int n, const double *scale, const double *sumsq, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:scale[0:n]) \ depend(in:sumsq[0:n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) { double scl = 0.0; double sum = 1.0; for (int j = 0; j < n; j++) { for (int i = j+1; i < n; i++) { int idx = m*j+i; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } } sum = 2.0*sum; for (int j = 0; j < n; j++) { int idx = m*j+j; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } *value = scl*sqrt(sum); } } }